[error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,crypto_sup} started: [{pid,<0.123.0>}, {name,crypto_server}, {mfargs,{crypto_server,start_link,[]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: crypto started_at: nonode@nohost [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: public_key started_at: nonode@nohost [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,inets_sup} started: [{pid,<0.129.0>}, {name,ftp_sup}, {mfargs,{ftp_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,httpc_profile_sup} started: [{pid,<0.132.0>}, {name,httpc_manager}, {mfargs, {httpc_manager,start_link, [default,only_session_cookies,inets]}}, {restart_type,permanent}, {shutdown,4000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,httpc_sup} started: [{pid,<0.131.0>}, {name,httpc_profile_sup}, {mfargs, {httpc_profile_sup,start_link, [[{httpc,{default,only_session_cookies}}]]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,httpc_sup} started: [{pid,<0.133.0>}, {name,httpc_handler_sup}, {mfargs,{httpc_handler_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,inets_sup} started: [{pid,<0.130.0>}, {name,httpc_sup}, {mfargs, {httpc_sup,start_link, [[{httpc,{default,only_session_cookies}}]]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,inets_sup} started: [{pid,<0.134.0>}, {name,httpd_sup}, {mfargs,{httpd_sup,start_link,[[]]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,inets_sup} started: [{pid,<0.135.0>}, {name,tftp_sup}, {mfargs,{tftp_sup,start_link,[[]]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: inets started_at: nonode@nohost [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: oauth started_at: nonode@nohost [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ssl_sup} started: [{pid,<0.141.0>}, {name,ssl_broker_sup}, {mfargs,{ssl_broker_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ssl_sup} started: [{pid,<0.142.0>}, {name,ssl_manager}, {mfargs,{ssl_manager,start_link,[[]]}}, {restart_type,permanent}, {shutdown,4000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ssl_sup} started: [{pid,<0.143.0>}, {name,ssl_connection}, {mfargs,{ssl_connection_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,4000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: ssl started_at: nonode@nohost [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ibrowse_sup} started: [{pid,<0.148.0>}, {name,ibrowse}, {mfargs,{ibrowse,start_link,[]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: ibrowse started_at: nonode@nohost [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: mochiweb started_at: nonode@nohost [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: couch_set_view started_at: nonode@nohost [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_server_sup} started: [{pid,<0.155.0>}, {name,couch_config}, {mfargs, {couch_server_sup,couch_config_start_link_wrapper, [["/opt/couchbase/etc/couchdb/default.ini", "/opt/couchbase/etc/couchdb/default.d/capi.ini", "/opt/couchbase/etc/couchdb/default.d/geocouch.ini", "/opt/couchbase/etc/couchdb/local.ini", "/opt/couchbase/etc/couchdb/local.d/mccouch.ini"], <0.155.0>]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.158.0>}, {name,collation_driver}, {mfargs,{couch_drv,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.159.0>}, {name,couch_task_events}, {mfargs, {gen_event,start_link,[{local,couch_task_events}]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.160.0>}, {name,couch_task_status}, {mfargs,{couch_task_status,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.161.0>}, {name,couch_file_write_guard}, {mfargs,{couch_file_write_guard,sup_start_link,[]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.162.0>}, {name,couch_server}, {mfargs,{couch_server,sup_start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.163.0>}, {name,couch_compress_types}, {mfargs,{couch_compress_types,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.164.0>}, {name,couch_db_update_event}, {mfargs, {gen_event,start_link,[{local,couch_db_update}]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.165.0>}, {name,couch_replication_event}, {mfargs, {gen_event,start_link,[{local,couch_replication}]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.166.0>}, {name,couch_replication_supervisor}, {mfargs,{couch_rep_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.167.0>}, {name,couch_log}, {mfargs,{couch_log,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.168.0>}, {name,couch_main_index_barrier}, {mfargs, {couch_index_barrier,start_link, [couch_main_index_barrier, "max_parallel_indexers"]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.169.0>}, {name,couch_replica_index_barrier}, {mfargs, {couch_index_barrier,start_link, [couch_replica_index_barrier, "max_parallel_replica_indexers"]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.170.0>}, {name,couch_access_log}, {mfargs,{couch_access_log,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_server_sup} started: [{pid,<0.157.0>}, {name,couch_primary_services}, {mfargs,{couch_primary_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.174.0>}, {name,couch_db_update_notifier_sup}, {mfargs,{couch_db_update_notifier_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.175.0>}, {name,auth_cache}, {mfargs,{couch_auth_cache,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.189.0>}, {name,os_daemons}, {mfargs,{couch_os_daemons,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.191.0>}, {name,mc_couch_events}, {mfargs, {gen_event,start_link,[{local,mc_couch_events}]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.192.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.193.0>}, {name,mc_conn_sup}, {mfargs,{mc_conn_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.190.0>}, {name,mc_daemon}, {mfargs,{mc_sup,start_link,[11213]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [couchdb:info] [2012-03-26 1:02:59] [nonode@nohost:<0.192.0>:couch_log:info:39] mccouch is listening on port 11213 [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.194.0>}, {name,vhosts}, {mfargs,{couch_httpd_vhost,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.195.0>}, {name,set_view_manager}, {mfargs,{couch_set_view,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.197.0>}, {name,spatial_manager}, {mfargs,{couch_spatial,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.198.0>}, {name,query_servers}, {mfargs,{couch_query_servers,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.200.0>}, {name,view_manager}, {mfargs,{couch_view,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.202.0>}, {name,httpd}, {mfargs,{couch_httpd,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.219.0>}, {name,external_manager}, {mfargs,{couch_external_manager,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.220.0>}, {name,uuids}, {mfargs,{couch_uuids,start,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [couchdb:info] [2012-03-26 1:02:59] [nonode@nohost:cb_couch_sup:couch_log:info:39] Apache CouchDB has started on http://0.0.0.0:8092/ [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_server_sup} started: [{pid,<0.173.0>}, {name,couch_secondary_services}, {mfargs,{couch_secondary_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,cb_couch_sup} started: [{pid,<0.156.0>}, {name,couch_app}, {mfargs, {couch_app,start, [fake, ["/opt/couchbase/etc/couchdb/default.ini", "/opt/couchbase/etc/couchdb/local.ini"]]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_cluster_sup} started: [{pid,<0.118.0>}, {name,cb_couch_sup}, {mfargs,{cb_couch_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,supervisor}] [error_logger:error] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,couch_primary_services} Context: child_terminated Reason: normal Offender: [{pid,<0.167.0>}, {name,couch_log}, {mfargs,{couch_log,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.222.0>}, {name,couch_log}, {mfargs,{couch_log,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [ns_server:info] [2012-03-26 1:02:59] [nonode@nohost:ns_server_cluster_sup:log_os_info:start_link:25] OS type: {unix, linux} Version: {2, 6, 18} Runtime info: [{otp_release,"R14B04"}, {erl_version,"5.8.5"}, {erl_version_long, "Erlang R14B04 (erts-5.8.5) [source] [64-bit] [smp:4:4] [rq:4] [async-threads:16] [hipe] [kernel-poll:true]\n"}, {system_arch_raw,"x86_64-unknown-linux-gnu"}, {system_arch,"x86_64-unknown-linux-gnu"}, {localtime,{{2012,3,26},{1,2,59}}}, {memory, [{total,23414768}, {processes,4227512}, {processes_used,4223584}, {system,19187256}, {atom,829953}, {atom_used,821354}, {binary,100752}, {code,7757096}, {ets,688768}]}, {loaded, [ns_info,log_os_info,couch_config_writer,cb_init_loggers, couch_external_manager,mochiweb_acceptor,mochiweb_socket, mochiweb_socket_server,mochilists,mochiweb_http,eval_bits, couch_view,couch_query_servers,couch_spatial, couch_set_view,couch_httpd,inet_tcp,couch_httpd_vhost, gen_tcp,mc_conn_sup,mc_tcp_listener,mc_sup, couch_os_daemons,snappy,couch_compress,ejson,couch_doc, couch_db_update_notifier,couch_btree,couch_ref_counter, couch_uuids,crypto,couch_db_updater,couch_db, couch_auth_cache,couch_db_update_notifier_sup, couch_secondary_sup,couch_access_log,couch_index_barrier, couch_event_sup,couch_log,couch_rep_sup, couch_compress_types,httpd_util,filelib,couch_file, couch_file_write_guard,couch_task_status,erl_ddll, couch_drv,couch_primary_sup,couch_server,string,re, couch_util,couch_config,couch_server_sup,mochiweb_sup, mochiweb_app,ibrowse,ibrowse_sup,ibrowse_app, ssl_connection_sup,ssl_session_cache,ssl_certificate_db, ssl_manager,ssl_broker_sup,ssl_sup,ssl_app,tftp_sup, httpd_sup,httpc_handler_sup,httpc_cookie,inets, httpc_manager,httpc,httpc_profile_sup,httpc_sup,ftp_sup, inets_sup,inets_app,ale_default_formatter,random, crypto_server,crypto_sup,crypto_app,couch_app, cb_couch_sup,ns_server_cluster_sup,'ale_logger-views', 'ale_logger-cluster','ale_logger-rebalance', 'ale_logger-stats','ale_logger-ns_doctor', 'ale_logger-menelaus','ale_logger-user', 'ale_logger-ns_server','ale_logger-couchdb',misc, ns_log_sink,disk_log_sup,disk_log_server,disk_log_1, disk_log,ale_disk_sink,ns_server,timer,io_lib_fread, cpu_sup,memsup,disksup,os_mon,io,sasl_report, release_handler,calendar,overload,alarm_handler,log_mf_h, sasl_report_tty_h,sasl,ale_error_logger_handler, 'ale_logger-ale_logger','ale_logger-error_logger', beam_opcodes,beam_dict,beam_asm,beam_validator, beam_flatten,beam_trim,beam_receive,beam_bsm,beam_peep, beam_dead,beam_type,beam_bool,beam_clean,beam_jump, beam_block,beam_utils,v3_codegen,v3_life,v3_kernel, sys_core_dsetel,erl_bifs,sys_core_fold,cerl_trees, sys_core_inline,core_lib,cerl,v3_core,erl_bits, erl_expand_records,sys_pre_expand,sofs,otp_internal, erl_internal,sets,erl_lint,compile,dynamic_compile, io_lib_pretty,io_lib_format,io_lib,ale_codegen,dict, ale_server,ale_utils,ordsets,ale,ale_dynamic_sup,ale_sup, ale_app,ns_bootstrap,file_io_server,orddict,erl_eval,c, error_logger_tty_h,queue,kernel_config,user,user_sup, supervisor_bridge,standard_error,ram_file,file,beam_lib, unicode,binary,ets,gb_sets,hipe_unified_loader,packages, code_server,code,file_server,net_kernel,global_group, erl_distribution,filename,inet_gethost_native,os, inet_parse,inet,inet_udp,inet_config,inet_db,global, gb_trees,rpc,supervisor,kernel,application_master,sys, application,gen_server,erl_parse,proplists,erl_scan,lists, application_controller,proc_lib,gen,gen_event, error_logger,heart,error_handler,erlang,erl_prim_loader, prim_zip,zlib,prim_file,prim_inet,init,otp_ring0]}, {applications, [{public_key,"Public key infrastructure","0.13"}, {ale,"Another Logger for Erlang","8cffe61"}, {os_mon,"CPO CXC 138 46","2.2.7"}, {couch_set_view,"Set views","1.2.0a-8bfbe08-git"}, {inets,"INETS CXC 138 49","5.7.1"}, {couch,"Apache CouchDB","1.2.0a-8bfbe08-git"}, {kernel,"ERTS CXC 138 10","2.14.5"}, {crypto,"CRYPTO version 2","2.0.4"}, {ssl,"Erlang/OTP SSL application","4.1.6"}, {sasl,"SASL CXC 138 11","2.1.10"}, {ns_server,"Couchbase server","2.0.0r-944-rel-enterprise"}, {mochiweb,"MochiMedia Web Server","1.4.1"}, {ibrowse,"HTTP client application","2.2.0"}, {oauth,"Erlang OAuth implementation","7d85d3ef"}, {stdlib,"ERTS CXC 138 10","1.17.5"}]}, {pre_loaded, [erlang,erl_prim_loader,prim_zip,zlib,prim_file,prim_inet, init,otp_ring0]}, {process_count,157}, {node,nonode@nohost}, {nodes,[]}, {registered, [couch_server_sup,sasl_safe_sup,couch_rep_sup,couch_log, couch_httpd,standard_error,ssl_connection_sup,httpd_sup, cb_couch_sup,couch_auth_cache,ssl_manager,timer_server, erl_prim_loader,sasl_sup,ssl_broker_sup,couch_server, ns_server_cluster_sup,ssl_sup,inet_db,error_logger, couch_external_manager,couch_config,rex,kernel_sup, global_name_server,httpc_sup,global_group,file_server_2, release_handler,httpc_profile_sup,httpc_manager, httpc_handler_sup,overload,os_mon_sup,ftp_sup, mc_couch_events,alarm_handler,mc_conn_sup,couch_set_view, cpu_sup,ale_sup,memsup,disksup,'sink-disk_default', standard_error_sup,disk_log_sup,inets_sup,disk_log_server, ale_dynamic_sup,crypto_server,crypto_sup,user, 'logger-ale_logger',code_server,ibrowse,mochiweb_sup, couch_spatial,application_controller, couch_secondary_services,ale,couch_primary_services, couch_db_update,mc_sup,couch_access_log,couch_os_daemons, 'logger-error_logger',init,couch_file_write_guard, couch_query_servers,'logger-views', couch_db_update_notifier_sup,ibrowse_sup,'logger-cluster', 'logger-rebalance',kernel_safe_sup,'logger-stats', couch_view,couch_uuids,'logger-ns_doctor',tftp_sup, 'logger-menelaus',couch_task_status, couch_replica_index_barrier,'logger-user', couch_main_index_barrier,'logger-ns_server', couch_compress_types,couch_drv,couch_replication, 'logger-couchdb','sink-ns_log',couch_task_events, 'sink-disk_couchdb',couch_httpd_vhost,'sink-disk_views', 'sink-disk_error']}, {cookie,nocookie}, {wordsize,8}, {wall_clock,1}] [ns_server:info] [2012-03-26 1:02:59] [nonode@nohost:ns_server_cluster_sup:log_os_info:start_link:27] Manifest: ["bucket_engine 2.0.0r-944-rel Linux-x86_64", "couchbase-examples 2.0.0r-944-rel Linux-x86_64", "couchbase-python-client 2.0.0r-944-rel Linux-x86_64", "couchbase-server 2.0.0r-944-rel Linux-x86_64", "couchdb 2.0.0r-944-rel Linux-x86_64", "couchdbx-app 2.0.0r-944-rel Linux-x86_64", "couchstore 2.0.0r-944-rel Linux-x86_64", "ep-engine 2.0.0r-944-rel Linux-x86_64", "geocouch 2.0.0r-944-rel Linux-x86_64","icu4c 2.0.0r-944-rel Linux-x86_64", "libconflate 2.0.0r-944-rel Linux-x86_64", "libcouchbase 2.0.0r-944-rel Linux-x86_64", "libmemcached 2.0.0r-944-rel Linux-x86_64", "libvbucket 2.0.0r-944-rel Linux-x86_64", "manifest 2.0.0r-944-rel Linux-x86_64", "manifest-master 2.0.0r-944-rel Linux-x86_64", "mccouch 2.0.0r-944-rel Linux-x86_64", "membase-cli 2.0.0r-944-rel Linux-x86_64", "memcached 2.0.0r-944-rel Linux-x86_64", "memcachetest 2.0.0r-944-rel Linux-x86_64", "moxi 2.0.0r-944-rel Linux-x86_64","ns_server 2.0.0r-944-rel Linux-x86_64", "otp 2.0.0r-944-rel Linux-x86_64","portsigar 2.0.0r-944-rel Linux-x86_64", "sigar 2.0.0r-944-rel Linux-x86_64","snappy 2.0.0r-944-rel Linux-x86_64", "spidermonkey 2.0.0r-944-rel Linux-x86_64", "testrunner 2.0.0r-944-rel Linux-x86_64","tlm 2.0.0r-944-rel Linux-x86_64", "v8 2.0.0r-944-rel Linux-x86_64", "workload-generator 2.0.0r-944-rel Linux-x86_64"] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_cluster_sup} started: [{pid,<0.223.0>}, {name,timeout_diag_logger}, {mfargs,{timeout_diag_logger,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:02:59] [nonode@nohost:dist_manager:dist_manager:read_address_config:52] reading ip config from "/opt/couchbase/var/lib/couchbase/ip" [ns_server:error] [2012-03-26 1:02:59] [nonode@nohost:dist_manager:dist_manager:read_address_config:58] Got error:einval. Ignoring bad address:[] [ns_server:info] [2012-03-26 1:02:59] [nonode@nohost:dist_manager:dist_manager:bringup:114] Attempting to bring up net_kernel with name 'ns_1@127.0.0.1' [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,net_sup} started: [{pid,<0.226.0>}, {name,erl_epmd}, {mfargs,{erl_epmd,start_link,[]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,net_sup} started: [{pid,<0.227.0>}, {name,auth}, {mfargs,{auth,start_link,[]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,net_sup} started: [{pid,<0.228.0>}, {name,net_kernel}, {mfargs, {net_kernel,start_link, [['ns_1@127.0.0.1',longnames]]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,kernel_sup} started: [{pid,<0.225.0>}, {name,net_sup_dynamic}, {mfargs, {erl_distribution,start_link, [['ns_1@127.0.0.1',longnames]]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:dist_manager:dist_manager:save_node:81] saving node to "/opt/couchbase/var/lib/couchbase/couchbase-server.node" [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:dist_manager:dist_manager:bringup:122] Attempted to save node name to disk: ok [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_cluster_sup} started: [{pid,<0.224.0>}, {name,dist_manager}, {mfargs,{dist_manager,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_cluster_sup} started: [{pid,<0.231.0>}, {name,ns_cookie_manager}, {mfargs,{ns_cookie_manager,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_cluster_sup} started: [{pid,<0.232.0>}, {name,ns_cluster}, {mfargs,{ns_cluster,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mb_mnesia_sup} started: [{pid,<0.234.0>}, {name,mb_mnesia_events}, {mfargs, {gen_event,start_link,[{local,mb_mnesia_events}]}}, {restart_type,permanent}, {shutdown,10}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_sup} started: [{pid,<0.241.0>}, {name,mnesia_event}, {mfargs,{mnesia_sup,start_event,[]}}, {restart_type,permanent}, {shutdown,30000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.243.0>}, {name,mnesia_monitor}, {mfargs,{mnesia_monitor,start,[]}}, {restart_type,permanent}, {shutdown,3000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.244.0>}, {name,mnesia_subscr}, {mfargs,{mnesia_subscr,start,[]}}, {restart_type,permanent}, {shutdown,3000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.245.0>}, {name,mnesia_locker}, {mfargs,{mnesia_locker,start,[]}}, {restart_type,permanent}, {shutdown,3000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.246.0>}, {name,mnesia_recover}, {mfargs,{mnesia_recover,start,[]}}, {restart_type,permanent}, {shutdown,180000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.247.0>}, {name,mnesia_tm}, {mfargs,{mnesia_tm,start,[]}}, {restart_type,permanent}, {shutdown,30000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.248.0>}, {name,mnesia_checkpoint_sup}, {mfargs,{mnesia_checkpoint_sup,start,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.249.0>}, {name,mnesia_snmp_sup}, {mfargs,{mnesia_snmp_sup,start,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.250.0>}, {name,mnesia_controller}, {mfargs,{mnesia_controller,start,[]}}, {restart_type,permanent}, {shutdown,3000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.251.0>}, {name,mnesia_late_loader}, {mfargs,{mnesia_late_loader,start,[]}}, {restart_type,permanent}, {shutdown,3000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_sup} started: [{pid,<0.242.0>}, {name,mnesia_kernel_sup}, {mfargs,{mnesia_kernel_sup,start,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: mnesia started_at: 'ns_1@127.0.0.1' [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,kernel_safe_sup} started: [{pid,<0.257.0>}, {name,dets_sup}, {mfargs,{dets_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,kernel_safe_sup} started: [{pid,<0.258.0>}, {name,dets}, {mfargs,{dets_server,start_link,[]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:ensure_schema:425] Committed schema to disk. [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:init:261] Current config: [{access_module,mnesia}, {auto_repair,true}, {backup_module,mnesia_backup}, {checkpoints,[]}, {db_nodes,['ns_1@127.0.0.1']}, {debug,verbose}, {directory,"/opt/couchbase/var/lib/couchbase/mnesia"}, {dump_log_load_regulation,false}, {dump_log_time_threshold,180000}, {dump_log_update_in_place,true}, {dump_log_write_threshold,1000}, {embedded_mnemosyne,false}, {event_module,mnesia_event}, {extra_db_nodes,[]}, {fallback_activated,false}, {held_locks,[]}, {ignore_fallback_at_startup,false}, {fallback_error_function,{mnesia,lkill}}, {is_running,yes}, {local_tables,[local_config,cluster,schema]}, {lock_queue,[]}, {log_version,"4.3"}, {master_node_tables,[]}, {max_wait_for_decision,10000}, {protocol_version,{8,0}}, {running_db_nodes,['ns_1@127.0.0.1']}, {schema_location,opt_disc}, {schema_version,{3,0}}, {subscribers,[<0.241.0>,<0.235.0>]}, {tables,[local_config,cluster,schema]}, {transaction_commits,5}, {transaction_failures,0}, {transaction_log_writes,6}, {transaction_restarts,0}, {transactions,[]}, {use_dir,true}, {core_dir,false}, {no_table_loaders,2}, {dc_dump_limit,4}, {send_compressed,0}, {version,"4.5"}] Peers: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,schema, [{name,schema}, {type,set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,false}, {record_name,schema}, {attributes,[table,cstruct]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748979,585312},'ns_1@127.0.0.1'}}, {version,{{3,0},{'ns_1@127.0.0.1',{1332,748979,617687}}}}]}, {tid,3,<0.254.0>}} [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:210] Peers: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:210] Peers: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,local_config, [{name,local_config}, {type,set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,local_config}, {attributes,[key,val]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748979,706102},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,5,<0.277.0>}} [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,local_config, [{name,local_config}, {type,set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,local_config}, {attributes,[key,val]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748979,706102},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,5,<0.277.0>}} [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mb_mnesia_sup} started: [{pid,<0.235.0>}, {name,mb_mnesia}, {mfargs,{mb_mnesia,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_cluster_sup} started: [{pid,<0.233.0>}, {name,mb_mnesia_sup}, {mfargs,{mb_mnesia_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_sup:ns_config_sup:init:32] loading static ns_config from "/opt/couchbase/etc/couchbase/config" [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_config_sup} started: [{pid,<0.284.0>}, {name,ns_config_events}, {mfargs, {gen_event,start_link,[{local,ns_config_events}]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config:ns_config_default:upgrade_config_from_1_6_to_1_7:292] Upgrading config from 1.6 to 1.7 [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config:ns_config:do_upgrade_config:455] Upgrading config by changes: [{set,{node,'ns_1@127.0.0.1',config_version},{1,7}}, {set, {node,'ns_1@127.0.0.1',memcached}, [{port,11210}, {mccouch_port,11213}, {dbdir,"/opt/couchbase/var/lib/couchbase/data"}, {admin_user,"_admin"}, {admin_pass,"_admin"}, {bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"}, {engines, [{membase, [{engine,"/opt/couchbase/lib/memcached/ep.so"}, {static_config_string, "vb0=false;waitforwarmup=false;failpartialwarmup=false"}]}, {memcached, [{engine,"/opt/couchbase/lib/memcached/default_engine.so"}, {static_config_string,"vb0=true"}]}]}, {verbosity,[]}]}, {set, {node,'ns_1@127.0.0.1',ns_log}, [{filename,"/opt/couchbase/var/lib/couchbase/data/ns_log"}]}, {set, {node,'ns_1@127.0.0.1',isasl}, [{path,"/opt/couchbase/var/lib/couchbase/data/isasl.pw"}]}, {set,directory,"/opt/couchbase/var/lib/couchbase/config"}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config:ns_config_default:upgrade_config_from_1_7_to_1_7_1:315] Upgrading config from 1.7 to 1.7.1 [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config:ns_config:do_upgrade_config:455] Upgrading config by changes: [{set,{node,'ns_1@127.0.0.1',config_version},{1,7,1}}, {set,email_alerts, [{recipients,["root@localhost"]}, {sender,"couchbase@localhost"}, {enabled,false}, {email_server,[{user,[]}, {pass,[]}, {host,"localhost"}, {port,25}, {encrypt,false}]}, {alerts,[auto_failover_node,auto_failover_maximum_reached, auto_failover_other_nodes_down, auto_failover_cluster_too_small]}]}, {set,auto_failover_cfg, [{enabled,false},{timeout,30},{max_nodes,1},{count,0}]}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config:ns_config_default:upgrade_config_from_1_7_1_to_1_7_2:326] Upgrading config from 1.7.1 to 1.7.2 [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config:ns_config:do_upgrade_config:455] Upgrading config by changes: [{set,{node,'ns_1@127.0.0.1',config_version},{1,7,2}}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config:ns_config_default:upgrade_config_from_1_7_2_to_1_8_0:380] Upgrading config from 1.7.2 to 1.8.0 [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config:ns_config:do_upgrade_config:455] Upgrading config by changes: [{set,{node,'ns_1@127.0.0.1',config_version},{1,8,0}}, {set,{node,'ns_1@127.0.0.1',port_servers}, [{moxi,"/opt/couchbase/bin/moxi", ["-Z", {"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", [port]}, "-z", {"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming", [{misc,this_node_rest_port,[]}]}, "-p","0","-Y","y","-O","stderr", {"~s",[verbosity]}], [{env,[{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR", {"~s",[{ns_moxi_sup,rest_user,[]}]}}, {"MOXI_SASL_PLAIN_PWD", {"~s",[{ns_moxi_sup,rest_pass,[]}]}}]}, use_stdio,exit_status,port_server_send_eol,stderr_to_stdout, stream]}, {memcached,"/opt/couchbase/bin/memcached", ["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so", "-p", {"~B",[port]}, "-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B", "binary","-r","-c","10000","-e", {"admin=~s;default_bucket_name=default;auto_create=false", [admin_user]}, {"~s",[verbosity]}], [{env,[{"EVENT_NOSELECT","1"}, {"MEMCACHED_TOP_KEYS","100"}, {"ISASL_PWFILE",{"~s",[{isasl,path}]}}, {"ISASL_DB_CHECK_TIME","1"}]}, use_stdio,stderr_to_stdout,exit_status, port_server_send_eol,stream]}]}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config:ns_config_default:upgrade_config_from_1_8_0_to_2_0:417] Upgrading config from 1.7.2 to 2.0 [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config:ns_config:do_upgrade_config:455] Upgrading config by changes: [{set,{node,'ns_1@127.0.0.1',config_version},{2,0}}, {set,vbucket_map_history,[]}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_config_sup} started: [{pid,<0.285.0>}, {name,ns_config}, {mfargs, {ns_config,start_link, ["/opt/couchbase/etc/couchbase/config", ns_config_default]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config:ns_config:do_init:500] Upgraded initial config: {config, {full,"/opt/couchbase/etc/couchbase/config",undefined,ns_config_default}, [[], [{directory,"/opt/couchbase/var/lib/couchbase/config"}, {autocompaction, [{database_fragmentation_threshold,{30,nil}}, {view_fragmentation_threshold,{30,nil}}]}, {nodes_wanted,['ns_1@127.0.0.1']}, {{node,'ns_1@127.0.0.1',membership},active}, {rest,[{port,8091}]}, {max_parallel_indexers,4}, {{node,'ns_1@127.0.0.1',rest},[{port,8091},{port_meta,global}]}, {{node,'ns_1@127.0.0.1',capi_port},8092}, {rest_creds,[{creds,[]}]}, {remote_clusters,[]}, {{node,'ns_1@127.0.0.1',isasl}, [{path,"/opt/couchbase/var/lib/couchbase/data/isasl.pw"}]}, {{node,'ns_1@127.0.0.1',memcached}, [{port,11210}, {mccouch_port,11213}, {dbdir,"/opt/couchbase/var/lib/couchbase/data"}, {admin_user,"_admin"}, {admin_pass,"_admin"}, {bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"}, {engines, [{membase, [{engine,"/opt/couchbase/lib/memcached/ep.so"}, {static_config_string, "vb0=false;waitforwarmup=false;failpartialwarmup=false"}]}, {memcached, [{engine,"/opt/couchbase/lib/memcached/default_engine.so"}, {static_config_string,"vb0=true"}]}]}, {verbosity,[]}]}, {memory_quota,3082}, {buckets,[{configs,[]}]}, {{node,'ns_1@127.0.0.1',moxi},[{port,11211},{verbosity,[]}]}, {{node,'ns_1@127.0.0.1',port_servers}, [{moxi,"/opt/couchbase/bin/moxi", ["-Z", {"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", [port]}, "-z", {"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming", [{misc,this_node_rest_port,[]}]}, "-p","0","-Y","y","-O","stderr", {"~s",[verbosity]}], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}}, {"MOXI_SASL_PLAIN_PWD", {"~s",[{ns_moxi_sup,rest_pass,[]}]}}]}, use_stdio,exit_status,port_server_send_eol,stderr_to_stdout, stream]}, {memcached,"/opt/couchbase/bin/memcached", ["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-p", {"~B",[port]}, "-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B", "binary","-r","-c","10000","-e", {"admin=~s;default_bucket_name=default;auto_create=false", [admin_user]}, {"~s",[verbosity]}], [{env, [{"EVENT_NOSELECT","1"}, {"MEMCACHED_TOP_KEYS","100"}, {"ISASL_PWFILE",{"~s",[{isasl,path}]}}, {"ISASL_DB_CHECK_TIME","1"}]}, use_stdio,stderr_to_stdout,exit_status,port_server_send_eol, stream]}]}, {{node,'ns_1@127.0.0.1',ns_log}, [{filename,"/opt/couchbase/var/lib/couchbase/data/ns_log"}]}, {email_alerts, [{recipients,["root@localhost"]}, {sender,"couchbase@localhost"}, {enabled,false}, {email_server, [{user,[]}, {pass,[]}, {host,"localhost"}, {port,25}, {encrypt,false}]}, {alerts, [auto_failover_node,auto_failover_maximum_reached, auto_failover_other_nodes_down, auto_failover_cluster_too_small]}]}, {replication,[{enabled,true}]}, {auto_failover_cfg, [{enabled,false},{timeout,30},{max_nodes,1},{count,0}]}, {{node,'ns_1@127.0.0.1',uuid},<<"fbcfc41dd1c3c07ea3e219b758bd1503">>}]], [[{vbucket_map_history,[{'_vclock',[{'ns_1@127.0.0.1',{1,63499968179}}]}]}, {directory, [{'_vclock',[{'ns_1@127.0.0.1',{1,63499968179}}]}, 47,111,112,116,47,99,111,117,99,104,98,97,115,101,47,118,97,114,47, 108,105,98,47,99,111,117,99,104,98,97,115,101,47,99,111,110,102, 105,103]}, {{node,'ns_1@127.0.0.1',config_version}, [{'_vclock',[{'ns_1@127.0.0.1',{5,63499968179}}]}|{2,0}]}, {auto_failover_cfg, [{'_vclock',[{'ns_1@127.0.0.1',{1,63499968179}}]}, {enabled,false}, {timeout,30}, {max_nodes,1}, {count,0}]}, {autocompaction, [{database_fragmentation_threshold,{30,nil}}, {view_fragmentation_threshold,{30,nil}}]}, {buckets,[{configs,[]}]}, {email_alerts, [{'_vclock',[{'ns_1@127.0.0.1',{1,63499968179}}]}, {recipients,["root@localhost"]}, {sender,"couchbase@localhost"}, {enabled,false}, {email_server, [{user,[]}, {pass,[]}, {host,"localhost"}, {port,25}, {encrypt,false}]}, {alerts, [auto_failover_node,auto_failover_maximum_reached, auto_failover_other_nodes_down, auto_failover_cluster_too_small]}]}, {max_parallel_indexers,4}, {memory_quota,3082}, {nodes_wanted,['ns_1@127.0.0.1']}, {remote_clusters,[]}, {replication,[{enabled,true}]}, {rest,[{port,8091}]}, {rest_creds,[{creds,[]}]}, {{node,'ns_1@127.0.0.1',capi_port},8092}, {{node,'ns_1@127.0.0.1',isasl}, [{'_vclock',[{'ns_1@127.0.0.1',{1,63499968179}}]}, {path,"/opt/couchbase/var/lib/couchbase/data/isasl.pw"}]}, {{node,'ns_1@127.0.0.1',membership},active}, {{node,'ns_1@127.0.0.1',memcached}, [{'_vclock',[{'ns_1@127.0.0.1',{1,63499968179}}]}, {port,11210}, {mccouch_port,11213}, {dbdir,"/opt/couchbase/var/lib/couchbase/data"}, {admin_user,"_admin"}, {admin_pass,"_admin"}, {bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"}, {engines, [{membase, [{engine,"/opt/couchbase/lib/memcached/ep.so"}, {static_config_string, "vb0=false;waitforwarmup=false;failpartialwarmup=false"}]}, {memcached, [{engine,"/opt/couchbase/lib/memcached/default_engine.so"}, {static_config_string,"vb0=true"}]}]}, {verbosity,[]}]}, {{node,'ns_1@127.0.0.1',moxi},[{port,11211},{verbosity,[]}]}, {{node,'ns_1@127.0.0.1',ns_log}, [{'_vclock',[{'ns_1@127.0.0.1',{1,63499968179}}]}, {filename,"/opt/couchbase/var/lib/couchbase/data/ns_log"}]}, {{node,'ns_1@127.0.0.1',port_servers}, [{'_vclock',[{'ns_1@127.0.0.1',{1,63499968179}}]}, {moxi,"/opt/couchbase/bin/moxi", ["-Z", {"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", [port]}, "-z", {"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming", [{misc,this_node_rest_port,[]}]}, "-p","0","-Y","y","-O","stderr", {"~s",[verbosity]}], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}}, {"MOXI_SASL_PLAIN_PWD", {"~s",[{ns_moxi_sup,rest_pass,[]}]}}]}, use_stdio,exit_status,port_server_send_eol,stderr_to_stdout, stream]}, {memcached,"/opt/couchbase/bin/memcached", ["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-p", {"~B",[port]}, "-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B", "binary","-r","-c","10000","-e", {"admin=~s;default_bucket_name=default;auto_create=false", [admin_user]}, {"~s",[verbosity]}], [{env, [{"EVENT_NOSELECT","1"}, {"MEMCACHED_TOP_KEYS","100"}, {"ISASL_PWFILE",{"~s",[{isasl,path}]}}, {"ISASL_DB_CHECK_TIME","1"}]}, use_stdio,stderr_to_stdout,exit_status,port_server_send_eol, stream]}]}, {{node,'ns_1@127.0.0.1',rest},[{port,8091},{port_meta,global}]}, {{node,'ns_1@127.0.0.1',uuid},<<"fbcfc41dd1c3c07ea3e219b758bd1503">>}]], ns_config_default, {ns_config,save_config_sync,[]}, undefined,false} [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_config_sup} started: [{pid,<0.288.0>}, {name,ns_config_remote}, {mfargs, {ns_config_replica,start_link, [{local,ns_config_remote}]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:init:49] isasl_sync init: ["/opt/couchbase/var/lib/couchbase/data/isasl.pw", "_admin", "_admin"] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:init:57] isasl_sync init buckets: [] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_config_sup} started: [{pid,<0.289.0>}, {name,ns_config_isasl_sync}, {mfargs,{ns_config_isasl_sync,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_config_sup} started: [{pid,<0.292.0>}, {name,ns_config_log}, {mfargs,{ns_config_log,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_config_sup} started: [{pid,<0.293.0>}, {name,cb_config_couch_sync}, {mfargs,{cb_config_couch_sync,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_cluster_sup} started: [{pid,<0.283.0>}, {name,ns_config_sup}, {mfargs,{ns_config_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_log:ns_log:init:64] Couldn't load logs from "/opt/couchbase/var/lib/couchbase/data/ns_log": {error, enoent} [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.300.0>}, {name,ns_log}, {mfargs,{ns_log,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.301.0>}, {name,ns_log_events}, {mfargs,{gen_event,start_link,[{local,ns_log_events}]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_node_disco_sup} started: [{pid,<0.303.0>}, {name,ns_node_disco_events}, {mfargs, {gen_event,start_link, [{local,ns_node_disco_events}]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_node_disco:ns_node_disco:init:98] Initting ns_node_disco with [] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_cookie_manager:ns_cookie_manager:do_cookie_sync:115] ns_cookie_manager do_cookie_sync [user:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_cookie_manager:ns_cookie_manager:do_cookie_init:91] Initial otp cookie generated: ryzltvzxtkwdptgs [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: otp -> [{cookie,ryzltvzxtkwdptgs}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_cookie_manager:ns_cookie_manager:do_cookie_save:152] saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie" [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_cookie_manager:ns_cookie_manager:do_cookie_save:154] attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie": ok [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:<0.305.0>:ns_node_disco:do_nodes_wanted_updated_fun:183] ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: ryzltvzxtkwdptgs [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:<0.305.0>:ns_node_disco:do_nodes_wanted_updated_fun:189] ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: ryzltvzxtkwdptgs [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_node_disco_sup} started: [{pid,<0.304.0>}, {name,ns_node_disco}, {mfargs,{ns_node_disco,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_node_disco_sup} started: [{pid,<0.308.0>}, {name,ns_node_disco_log}, {mfargs,{ns_node_disco_log,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_node_disco_sup} started: [{pid,<0.309.0>}, {name,ns_node_disco_conf_events}, {mfargs,{ns_node_disco_conf_events,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:init:56] init pulling [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:init:58] init pushing [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:init:62] init reannouncing [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:50] ns_node_disco_conf_events config on otp [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: otp -> [{cookie,ryzltvzxtkwdptgs}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: vbucket_map_history -> [] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_cookie_manager:ns_cookie_manager:do_cookie_sync:115] ns_cookie_manager do_cookie_sync [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: directory -> "/opt/couchbase/var/lib/couchbase/config" [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: {node,'ns_1@127.0.0.1',config_version} -> {2,0} [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: auto_failover_cfg -> [{enabled,false},{timeout,30},{max_nodes,1},{count,0}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_cookie_manager:ns_cookie_manager:do_cookie_save:152] saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie" [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: autocompaction -> [{database_fragmentation_threshold,{30,nil}}, {view_fragmentation_threshold,{30,nil}}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: email_alerts -> [{recipients,["root@localhost"]}, {sender,"couchbase@localhost"}, {enabled,false}, {email_server,[{user,[]}, {pass,[]}, {host,"localhost"}, {port,25}, {encrypt,false}]}, {alerts,[auto_failover_node,auto_failover_maximum_reached, auto_failover_other_nodes_down,auto_failover_cluster_too_small]}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: max_parallel_indexers -> 4 [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: memory_quota -> 3082 [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:44] ns_node_disco_conf_events config on nodes_wanted [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: nodes_wanted -> ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: remote_clusters -> [] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: replication -> [{enabled,true}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: rest -> [{port,8091}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:44] config change: rest_creds -> ******** [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: {node,'ns_1@127.0.0.1',capi_port} -> 8092 [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: {node,'ns_1@127.0.0.1',isasl} -> [{path,"/opt/couchbase/var/lib/couchbase/data/isasl.pw"}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: {node,'ns_1@127.0.0.1',membership} -> active [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: {node,'ns_1@127.0.0.1',memcached} -> [{port,11210}, {mccouch_port,11213}, {dbdir,"/opt/couchbase/var/lib/couchbase/data"}, {admin_user,"_admin"}, {admin_pass,"_admin"}, {bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"}, {engines, [{membase, [{engine,"/opt/couchbase/lib/memcached/ep.so"}, {static_config_string, "vb0=false;waitforwarmup=false;failpartialwarmup=false"}]}, {memcached, [{engine,"/opt/couchbase/lib/memcached/default_engine.so"}, {static_config_string,"vb0=true"}]}]}, {verbosity,[]}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: {node,'ns_1@127.0.0.1',moxi} -> [{port,11211},{verbosity,[]}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: {node,'ns_1@127.0.0.1',ns_log} -> [{filename,"/opt/couchbase/var/lib/couchbase/data/ns_log"}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: {node,'ns_1@127.0.0.1',port_servers} -> [{moxi,"/opt/couchbase/bin/moxi", ["-Z", {"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", [port]}, "-z", {"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming", [{misc,this_node_rest_port,[]}]}, "-p","0","-Y","y","-O","stderr", {"~s",[verbosity]}], [{env,[{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}}, {"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]}, use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]}, {memcached,"/opt/couchbase/bin/memcached", ["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-p", {"~B",[port]}, "-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B", "binary","-r","-c","10000","-e", {"admin=~s;default_bucket_name=default;auto_create=false", [admin_user]}, {"~s",[verbosity]}], [{env,[{"EVENT_NOSELECT","1"}, {"MEMCACHED_TOP_KEYS","100"}, {"ISASL_PWFILE",{"~s",[{isasl,path}]}}, {"ISASL_DB_CHECK_TIME","1"}]}, use_stdio,stderr_to_stdout,exit_status,port_server_send_eol, stream]}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: {node,'ns_1@127.0.0.1',rest} -> [{port,8091},{port_meta,global}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: {node,'ns_1@127.0.0.1',uuid} -> <<"fbcfc41dd1c3c07ea3e219b758bd1503">> [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_node_disco_sup} started: [{pid,<0.310.0>}, {name,ns_config_rep}, {mfargs,{ns_config_rep,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.302.0>}, {name,ns_node_disco_sup}, {mfargs,{ns_node_disco_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.315.0>}, {name,ns_tick_event}, {mfargs,{gen_event,start_link,[{local,ns_tick_event}]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_cookie_manager:ns_cookie_manager:do_cookie_save:154] attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie": ok [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_cookie_manager:ns_cookie_manager:do_cookie_sync:115] ns_cookie_manager do_cookie_sync [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:<0.312.0>:ns_node_disco:do_nodes_wanted_updated_fun:183] ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: ryzltvzxtkwdptgs [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:<0.312.0>:ns_node_disco:do_nodes_wanted_updated_fun:189] ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: ryzltvzxtkwdptgs [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_cookie_manager:ns_cookie_manager:do_cookie_save:152] saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie" [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_cookie_manager:ns_cookie_manager:do_cookie_save:154] attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie": ok [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:<0.313.0>:ns_node_disco:do_nodes_wanted_updated_fun:183] ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: ryzltvzxtkwdptgs [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:<0.313.0>:ns_node_disco:do_nodes_wanted_updated_fun:189] ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: ryzltvzxtkwdptgs [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_master:mb_master:init:88] I'm the only node, so I'm the master. [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_master_sup:misc:start_singleton:812] start_singleton(gen_fsm, ns_orchestrator, [], []): started as <0.322.0> on 'ns_1@127.0.0.1' [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mb_master_sup} started: [{pid,<0.322.0>}, {name,ns_orchestrator}, {mfargs,{ns_orchestrator,start_link,[]}}, {restart_type,permanent}, {shutdown,20}, {child_type,worker}] [ns_doctor:error] [2012-03-26 1:02:59] [ns_1@127.0.0.1:cb_replication:ns_doctor:get_nodes:153] Error attempting to get nodes: {exit, {noproc, {gen_server, call, [ns_doctor, get_nodes]}}} [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mb_master_sup} started: [{pid,<0.323.0>}, {name,cb_replication}, {mfargs,{cb_replication,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_master_sup:misc:start_singleton:812] start_singleton(gen_server, ns_tick, [], []): started as <0.325.0> on 'ns_1@127.0.0.1' [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mb_master_sup} started: [{pid,<0.325.0>}, {name,ns_tick}, {mfargs,{ns_tick,start_link,[]}}, {restart_type,permanent}, {shutdown,10}, {child_type,worker}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:<0.326.0>:auto_failover:init:120] init auto_failover. [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mb_master_sup} started: [{pid,<0.326.0>}, {name,auto_failover}, {mfargs,{auto_failover,start_link,[]}}, {restart_type,permanent}, {shutdown,10}, {child_type,worker}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_master_sup:misc:start_singleton:812] start_singleton(gen_server, auto_failover, [], []): started as <0.326.0> on 'ns_1@127.0.0.1' [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.318.0>}, {name,mb_master}, {mfargs,{mb_master,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.327.0>}, {name,buckets_events}, {mfargs, {gen_event,start_link,[{local,buckets_events}]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_mail_sup} started: [{pid,<0.329.0>}, {name,ns_mail}, {mfargs,{ns_mail,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_mail_sup} started: [{pid,<0.330.0>}, {name,ns_mail_log}, {mfargs,{ns_mail_log,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.328.0>}, {name,ns_mail_sup}, {mfargs,{ns_mail_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_log_events:ns_mail_log:init:45] ns_mail_log started up [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.331.0>}, {name,ns_stats_event}, {mfargs, {gen_event,start_link,[{local,ns_stats_event}]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.332.0>}, {name,ns_heart}, {mfargs,{ns_heart,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.334.0>}, {name,ns_doctor}, {mfargs,{ns_doctor,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.339.0>}, {name,menelaus_web}, {mfargs,{menelaus_web,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.356.0>}, {name,menelaus_event}, {mfargs,{menelaus_event,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.357.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_doctor:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:129] Got initial status [{'ns_1@127.0.0.1', [{last_heard, {1332, 748979, 816833}}, {outgoing_replications_safeness_level, []}, {incoming_replications_conf_hashes, []}, {replication, []}, {active_buckets, []}, {ready_buckets, []}, {local_tasks, []}, {memory, [{total, 28441144}, {processes, 5322080}, {processes_used, 5309632}, {system, 23119064}, {atom, 1121209}, {atom_used, 1115965}, {binary, 81360}, {code, 11109017}, {ets, 886432}]}, {system_stats, [{cpu_utilization_rate, 0}, {swap_total, 0}, {swap_used, 0}]}, {interesting_stats, []}, {cluster_compatibility_version, 1}, {version, [{public_key, "0.13"}, {ale, "8cffe61"}, {os_mon, "2.2.7"}, {couch_set_view, "1.2.0a-8bfbe08-git"}, {mnesia, "4.5"}, {inets, "5.7.1"}, {couch, "1.2.0a-8bfbe08-git"}, {kernel, "2.14.5"}, {crypto, "2.0.4"}, {ssl, "4.1.6"}, {sasl, "2.1.10"}, {ns_server, "2.0.0r-944-rel-enterprise"}, {mochiweb, "1.4.1"}, {ibrowse, "2.2.0"}, {oauth, "7d85d3ef"}, {stdlib, "1.17.5"}]}, {system_arch, "x86_64-unknown-linux-gnu"}, {wall_clock, 1}, {memory_data, {4040077312, 4013039616, {<0.7.0>, 142856}}}, {disk_data, [{"/", 55007284, 100}, {"/boot", 101086, 21}, {"/dev/shm", 1972692, 0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25328 kB\nBuffers: 26588 kB\nCached: 3662696 kB\nSwapCached: 0 kB\nActive: 204820 kB\nInactive: 3549144 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25328 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 872 kB\nWriteback: 0 kB\nAnonPages: 64280 kB\nMapped: 22872 kB\nSlab: 134548 kB\nPageTables: 5872 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 268976 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory, 4040077312}, {free_swap, 6140350464}, {total_swap, 6140452864}, {cached_memory, 3750600704}, {buffered_memory, 27226112}, {free_memory, 25935872}, {total_memory, 4040077312}]}, {node_storage_conf, [{db_path, "/opt/couchbase/var/lib/couchdb"}, {index_path, "/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock, {1533, 329}}, {context_switches, {25557, 0}}, {garbage_collection, {10075, 16794995, 0}}, {io, {{input, 9037886}, {output, 1863857}}}, {reductions, {9883010, 9883010}}, {run_queue, 0}, {runtime, {1680, 1680}}]}]}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.360.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.338.0>}, {name,menelaus}, {mfargs,{menelaus_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [user:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:ns_server_sup:menelaus_sup:start_link:44] Couchbase Server has started on web port 8091 on node 'ns_1@127.0.0.1'. [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.362.0>}, {name,ns_port_init}, {mfargs,{ns_port_init,start_link,[]}}, {restart_type,permanent}, {shutdown,10}, {child_type,worker}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:<0.363.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.363.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR",[]}, {"MOXI_SASL_PLAIN_PWD",[]}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR",[]}, {"MOXI_SASL_PLAIN_PWD",[]}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:<0.365.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.365.0>}, {name, {memcached,"/opt/couchbase/bin/memcached", ["-X", "/opt/couchbase/lib/memcached/stdin_term_handler.so", "-p","11210","-E", "/opt/couchbase/lib/memcached/bucket_engine.so", "-B","binary","-r","-c","10000","-e", "admin=_admin;default_bucket_name=default;auto_create=false", []], [{env, [{"EVENT_NOSELECT","1"}, {"MEMCACHED_TOP_KEYS","100"}, {"ISASL_PWFILE", "/opt/couchbase/var/lib/couchbase/data/isasl.pw"}, {"ISASL_DB_CHECK_TIME","1"}]}, use_stdio,stderr_to_stdout,exit_status, port_server_send_eol,stream]}}, {mfargs, {erlang,apply, [#Fun, [memcached,"/opt/couchbase/bin/memcached", ["-X", "/opt/couchbase/lib/memcached/stdin_term_handler.so", "-p","11210","-E", "/opt/couchbase/lib/memcached/bucket_engine.so", "-B","binary","-r","-c","10000","-e", "admin=_admin;default_bucket_name=default;auto_create=false", []], [{env, [{"EVENT_NOSELECT","1"}, {"MEMCACHED_TOP_KEYS","100"}, {"ISASL_PWFILE", "/opt/couchbase/var/lib/couchbase/data/isasl.pw"}, {"ISASL_DB_CHECK_TIME","1"}]}, use_stdio,stderr_to_stdout,exit_status, port_server_send_eol,stream]]]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.361.0>}, {name,ns_port_sup}, {mfargs,{ns_port_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.367.0>}, {name,ns_port_memcached_killer}, {mfargs,{ns_port_sup,start_memcached_force_killer,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.369.0>}, {name,ns_bucket_worker}, {mfargs,{work_queue,start_link,[ns_bucket_worker]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.370.0>}, {name,xdc_rep_manager}, {mfargs,{xdc_rep_manager,start_link,[]}}, {restart_type,permanent}, {shutdown,30000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.386.0>}, {name,buckets_observing_subscription}, {mfargs,{ns_bucket_sup,subscribe_on_config_events,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.385.0>}, {name,ns_bucket_sup}, {mfargs,{ns_bucket_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.387.0>}, {name,system_stats_collector}, {mfargs,{system_stats_collector,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.389.0>}, {name,{stats_archiver,"@system"}}, {mfargs,{stats_archiver,start_link,["@system"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-@system-minute' [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.391.0>}, {name,{stats_reader,"@system"}}, {mfargs,{stats_reader,start_link,["@system"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.392.0>}, {name,ns_moxi_sup_work_queue}, {mfargs, {work_queue,start_link,[ns_moxi_sup_work_queue]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:<0.397.0>:supervisor_cushion:init:43] starting couchbase_compaction_daemon with delay of 3000 [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.394.0>}, {name,ns_moxi_sup}, {mfargs,{ns_moxi_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.397.0>}, {name,couchbase_compaction_daemon}, {mfargs, {supervisor_cushion,start_link, [couchbase_compaction_daemon,3000, couchbase_compaction_daemon,start_link,[]]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-@system-minute', [{name,'stats_archiver-@system-minute'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748979,902296},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,6,<0.396.0>}} [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-@system-minute', [{name,'stats_archiver-@system-minute'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748979,902296},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,6,<0.396.0>}} [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-@system-hour' [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.411.0>}, {name,xdc_rdoc_replication_srv}, {mfargs,{xdc_rdoc_replication_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_cluster_sup} started: [{pid,<0.299.0>}, {name,ns_server_sup}, {mfargs,{ns_server_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: ns_server started_at: 'ns_1@127.0.0.1' [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-@system-hour', [{name,'stats_archiver-@system-hour'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748979,913776},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,7,<0.406.0>}} [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-@system-hour', [{name,'stats_archiver-@system-hour'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748979,913776},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,7,<0.406.0>}} [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-@system-day' [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-@system-day', [{name,'stats_archiver-@system-day'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748979,925547},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,8,<0.415.0>}} [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-@system-day', [{name,'stats_archiver-@system-day'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748979,925547},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,8,<0.415.0>}} [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-@system-week' [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-@system-week', [{name,'stats_archiver-@system-week'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748979,931055},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,9,<0.422.0>}} [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-@system-week', [{name,'stats_archiver-@system-week'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748979,931055},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,9,<0.422.0>}} [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-@system-month' [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-@system-month', [{name,'stats_archiver-@system-month'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748979,935775},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,10,<0.429.0>}} [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-@system-month', [{name,'stats_archiver-@system-month'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748979,935775},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,10,<0.429.0>}} [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-@system-year' [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-@system-year', [{name,'stats_archiver-@system-year'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748979,940704},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,11,<0.436.0>}} [ns_server:info] [2012-03-26 1:02:59] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-@system-year', [{name,'stats_archiver-@system-year'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748979,940704},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,11,<0.436.0>}} [error_logger:info] [2012-03-26 1:03:02] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,inet_gethost_native_sup} started: [{pid,<0.446.0>},{mfa,{inet_gethost_native,init,[[]]}}] [error_logger:info] [2012-03-26 1:03:02] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,kernel_safe_sup} started: [{pid,<0.445.0>}, {name,inet_gethost_native_sup}, {mfargs,{inet_gethost_native,start_link,[]}}, {restart_type,temporary}, {shutdown,1000}, {child_type,worker}] [user:info] [2012-03-26 1:03:02] [ns_1@127.0.0.1:<0.444.0>:menelaus_web_alerts_srv:global_alert:64] Approaching full disk warning. Usage of disk "/" on node "127.0.0.1" is around 100%. [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_events:ns_port_sup:terminate_port:129] unsupervising port: {moxi, "/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p", "0", "-Y", "y", "-O", "stderr", []], [{env, [{"EVENT_NOSELECT", "1"}, {"MOXI_SASL_PLAIN_USR", []}, {"MOXI_SASL_PLAIN_PWD", []}]}, use_stdio, exit_status, port_server_send_eol, stderr_to_stdout, stream]} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:<0.364.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:<0.348.0>:menelaus_web:handle_streaming:950] menelaus_web streaming socket closed by client [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:<0.364.0>:ns_port_server:log:166] moxi<0.364.0>: EOL on stdin. Exiting [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_events:ns_port_sup:launch_port:74] supervising port: {moxi, "/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p", "0", "-Y", "y", "-O", "stderr", []], [{env, [{"EVENT_NOSELECT", "1"}, {"MOXI_SASL_PLAIN_USR", "Administrator"}, {"MOXI_SASL_PLAIN_PWD", "password"}]}, use_stdio, exit_status, port_server_send_eol, stderr_to_stdout, stream]} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:<0.452.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [error_logger:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.452.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: rest -> [{port,8091}] [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:44] config change: rest_creds -> ******** [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: memory_quota -> 3082 [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:<0.453.0>:ns_port_server:log:166] moxi<0.453.0>: 2012-03-26 01:03:04: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.453.0>: 2012-03-26 01:03:04: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) [menelaus:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:<0.344.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "default"}, {single_bucket_sup, start_link, ["default"]}, permanent, infinity, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [error_logger:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.474.0>}, {name,{per_bucket_sup,"default"}}, {mfargs,{single_bucket_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:03:04] [ns_1@127.0.0.1:<0.354.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:03:04] [ns_1@127.0.0.1:<0.353.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:ensure_bucket:700] Created bucket "default" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=209715200;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/default;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=default;couch_port=11213;max_vbuckets=256;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.480.0>}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.484.0>}, {name,{ns_memcached,data,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.491.0>}, {name,{ns_vbm_sup,"default"}}, {mfargs,{ns_vbm_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.492.0>}, {name,{ns_vbm_new_sup,"default"}}, {mfargs,{ns_vbm_new_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.493.0>}, {name,{couch_stats_reader,"default"}}, {mfargs,{couch_stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.496.0>}, {name,{stats_collector,"default"}}, {mfargs,{stats_collector,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.498.0>}, {name,{stats_archiver,"default"}}, {mfargs,{stats_archiver,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-default-minute' [error_logger:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.500.0>}, {name,{stats_reader,"default"}}, {mfargs,{stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.503.0>}, {name,{failover_safeness_level,"default"}}, {mfargs, {failover_safeness_level,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.479.0>}, {name,{ns_memcached_sup,"default"}}, {mfargs,{ns_memcached_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-default-minute', [{name,'stats_archiver-default-minute'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748984,799528},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,22,<0.502.0>}} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-default-minute', [{name,'stats_archiver-default-minute'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748984,799528},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,22,<0.502.0>}} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-default-hour' [error_logger:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.506.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-default-hour', [{name,'stats_archiver-default-hour'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748984,806578},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,23,<0.521.0>}} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-default-hour', [{name,'stats_archiver-default-hour'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748984,806578},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,23,<0.521.0>}} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-default-day' [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 0 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 1 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 2 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 3 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 4 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 5 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 6 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 7 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 8 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 9 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 10 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 11 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 12 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 13 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 14 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 15 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 16 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 17 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 18 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 19 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 20 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 21 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 22 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 23 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 24 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 25 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 26 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 27 in <<"default">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-default-day', [{name,'stats_archiver-default-day'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748984,812446},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,24,<0.531.0>}} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-default-day', [{name,'stats_archiver-default-day'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748984,812446},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,24,<0.531.0>}} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-default-week' [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 28 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 29 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 30 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 31 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 32 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 33 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 34 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 35 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 36 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 37 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 38 in <<"default">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-default-week', [{name,'stats_archiver-default-week'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748984,825742},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,25,<0.574.0>}} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-default-week', [{name,'stats_archiver-default-week'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748984,825742},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,25,<0.574.0>}} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 39 in <<"default">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-default-month' [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 40 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 41 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 42 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 43 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 44 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 45 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 46 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 47 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 48 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 49 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 50 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 51 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 52 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 53 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 54 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 55 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 56 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 57 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 58 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 59 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 60 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 61 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 62 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 63 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 64 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 65 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 66 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 67 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 68 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 69 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 70 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 71 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 72 in <<"default">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-default-month', [{name,'stats_archiver-default-month'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748984,832512},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,26,<0.592.0>}} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-default-month', [{name,'stats_archiver-default-month'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748984,832512},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,26,<0.592.0>}} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 73 in <<"default">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-default-year' [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 74 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 75 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 76 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 77 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 78 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 79 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 80 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 81 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 82 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 83 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 84 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 85 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 86 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 87 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 88 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 89 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 90 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 91 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 92 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 93 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 94 in <<"default">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-default-year', [{name,'stats_archiver-default-year'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748984,843375},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,27,<0.634.0>}} [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-default-year', [{name,'stats_archiver-default-year'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,748984,843375},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,27,<0.634.0>}} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 95 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 96 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 97 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 98 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 99 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 100 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 101 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 102 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 103 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 104 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 105 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 106 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 107 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 108 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 109 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 110 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 111 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 112 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 113 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 114 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 115 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 116 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 117 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 118 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 119 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 120 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 121 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 122 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 123 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 124 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 125 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 126 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 127 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 128 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 129 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 130 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 131 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 132 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 133 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 134 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 135 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 136 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 137 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 138 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 139 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 140 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 141 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 142 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 143 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 144 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 145 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 146 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 147 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 148 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 149 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 150 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 151 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 152 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 153 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 154 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 155 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 156 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 157 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 158 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 159 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 160 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 161 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 162 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 163 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 164 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 165 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 166 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 167 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 168 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 169 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 170 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 171 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 172 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 173 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 174 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 175 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 176 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 177 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 178 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 179 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 180 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 181 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 182 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 183 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 184 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 185 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 186 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 187 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 188 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 189 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 190 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 191 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 192 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 193 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 194 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 195 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 196 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 197 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 198 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 199 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 200 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 201 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 202 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 203 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 204 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 205 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 206 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 207 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 208 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 209 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 210 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 211 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 212 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 213 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 214 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 215 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 216 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 217 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 218 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 219 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 220 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 221 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 222 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 223 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 224 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 225 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 226 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 227 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 228 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 229 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 230 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 231 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 232 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 233 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 234 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 235 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 236 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 237 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 238 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 239 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 240 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 241 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 242 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 243 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 244 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 245 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 246 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 247 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 248 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 249 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 250 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 251 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 252 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 253 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 254 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 255 in <<"default">>: {not_found, no_db_file} [views:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [views:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [error_logger:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.534.0>}, {name,{capi_set_view_manager,"default"}}, {mfargs,{capi_set_view_manager,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:<0.453.0>:ns_port_server:log:166] moxi<0.453.0>: 2012-03-26 01:03:06: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.453.0>: "name": "default", moxi<0.453.0>: "nodeLocator": "vbucket", moxi<0.453.0>: "saslPassword": "", moxi<0.453.0>: "nodes": [{ moxi<0.453.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.453.0>: "replication": 0, moxi<0.453.0>: "clusterMembership": "active", moxi<0.453.0>: "status": "warmup", moxi<0.453.0>: "thisNode": true, moxi<0.453.0>: "hostname": "127.0.0.1:8091", moxi<0.453.0>: "clusterCompatibility": 1, moxi<0.453.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.453.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.453.0>: "ports": { moxi<0.453.0>: "proxy": 11211, moxi<0.453.0>: "direct": 11210 moxi<0.453.0>: } moxi<0.453.0>: }], moxi<0.453.0>: "vBucketServerMap": { moxi<0.453.0>: "hashAlgorithm": "CRC", moxi<0.453.0>: "numReplicas": 1, moxi<0.453.0>: "serverList": ["127.0.0.1:11210"], moxi<0.453.0>: "vBucketMap": [] moxi<0.453.0>: } moxi<0.453.0>: }) moxi<0.453.0>: 2012-03-26 01:03:06: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.453.0>: "name": "default", moxi<0.453.0>: "nodeLocator": "vbucket", moxi<0.453.0>: "saslPassword": "", moxi<0.453.0>: "nodes": [{ moxi<0.453.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.453.0>: "replication": 0, moxi<0.453.0>: "clusterMembership": "active", moxi<0.453.0>: "status": "warmup", moxi<0.453.0>: "thisNode": true, moxi<0.453.0>: "hostname": "127.0.0.1:8091", moxi<0.453.0>: "clusterCompatibility": 1, moxi<0.453.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.453.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.453.0>: "ports": { moxi<0.453.0>: "proxy": 11211, moxi<0.453.0>: "direct": 11210 moxi<0.453.0>: } moxi<0.453.0>: }], moxi<0.453.0>: "vBucketServerMap": { moxi<0.453.0>: "hashAlgorithm": "CRC", moxi<0.453.0>: "numReplicas": 1, moxi<0.453.0>: "serverList": ["127.0.0.1:11210"], moxi<0.453.0>: "vBucketMap": [] moxi<0.453.0>: } moxi<0.453.0>: }) [ns_server:info] [2012-03-26 1:03:04] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Warning: Data diretory does not exist, /opt/couchbase/var/lib/couchdb/default memcached<0.366.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.366.0>: Failed to load mutation log, falling back to key dump memcached<0.366.0>: Warning: Data diretory is empty, /opt/couchbase/var/lib/couchdb/default memcached<0.366.0>: metadata loaded in 204 usec memcached<0.366.0>: Warning: Data diretory is empty, /opt/couchbase/var/lib/couchdb/default memcached<0.366.0>: warmup completed in 299 usec [user:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:handle_info:310] Bucket "default" loaded on node 'ns_1@127.0.0.1' in 0 seconds. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:ns_doctor:ns_doctor:update_status:207] The following buckets became ready on node 'ns_1@127.0.0.1': ["default"] [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 0 in "default" on 'ns_1@127.0.0.1' from missing to active. [views:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]}, {passive,[]}, {ignore,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {replica,[]}] [views:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.453.0>:ns_port_server:log:166] moxi<0.453.0>: 2012-03-26 01:03:07: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.453.0>: "name": "default", moxi<0.453.0>: "nodeLocator": "vbucket", moxi<0.453.0>: "saslPassword": "", moxi<0.453.0>: "nodes": [{ moxi<0.453.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.453.0>: "replication": 0, moxi<0.453.0>: "clusterMembership": "active", moxi<0.453.0>: "status": "healthy", moxi<0.453.0>: "thisNode": true, moxi<0.453.0>: "hostname": "127.0.0.1:8091", moxi<0.453.0>: "clusterCompatibility": 1, moxi<0.453.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.453.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.453.0>: "ports": { moxi<0.453.0>: "proxy": 11211, moxi<0.453.0>: "direct": 11210 moxi<0.453.0>: } moxi<0.453.0>: }], moxi<0.453.0>: "vBucketServerMap": { moxi<0.453.0>: "hashAlgorithm": "CRC", moxi<0.453.0>: "numReplicas": 1, moxi<0.453.0>: "serverList": ["127.0.0.1:11210"], moxi<0.453.0>: "vBucketMap": [] moxi<0.453.0>: } moxi<0.453.0>: }) [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}, {map,[['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1'|...], [...]|...]}]}]}] [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: vbucket_map_history -> [{[['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1'|...], [...]|...], [{max_slaves,10}]}] [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 1 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 2 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 3 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 4 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 5 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 6 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 7 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 8 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 9 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 10 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 11 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 12 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 13 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 14 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 15 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 16 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 17 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 18 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 19 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 20 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 21 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 22 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 23 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 24 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 25 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 26 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 27 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 28 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 29 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 30 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 31 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 32 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 33 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 34 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 35 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 36 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 37 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 38 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 39 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 40 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 41 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 42 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 43 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 44 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 45 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 46 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 47 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 48 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 49 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 50 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 51 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 52 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 53 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 54 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 55 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 56 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 57 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 58 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 59 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 60 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 61 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 62 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 63 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 64 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 65 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 66 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 67 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 68 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 69 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 70 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 71 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 72 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 73 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 74 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 75 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 76 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 77 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 78 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 79 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 80 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 81 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 82 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 83 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 84 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 85 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 86 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 87 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 88 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 89 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 90 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 91 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 92 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 93 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 94 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 95 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 96 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 97 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 98 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 99 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 100 in "default" on 'ns_1@127.0.0.1' from missing to active. [stats:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.496.0>:stats_collector:handle_info:84] Stats for bucket "default": accepting_conns 1 auth_cmds 0 auth_errors 0 bucket_active_conns 1 bucket_conns 2 bytes 2428850 bytes_read 2351 bytes_written 2561 cas_badval 0 cas_hits 0 cas_misses 0 cmd_flush 0 cmd_get 0 cmd_set 0 conn_yields 0 connection_structures 0 curr_connections 12 curr_items 0 curr_items_tot 0 daemon_connections 10 decr_hits 0 decr_misses 0 delete_hits 0 delete_misses 0 ep_allow_data_loss_during_shutdown 1 ep_alog_block_size 4096 ep_alog_path ep_alog_sleep_time 30 ep_backend couchdb ep_bg_fetch_delay 0 ep_bg_fetched 0 ep_cache_size 0 ep_chk_max_items 30000 ep_chk_period 3600 ep_chk_remover_stime 5 ep_commit_num 0 ep_commit_time 0 ep_commit_time_total 0 ep_concurrentDB 1 ep_config_file ep_couch_bucket default ep_couch_default_batch_size 500 ep_couch_host localhost ep_couch_port 11213 ep_couch_reconnect_sleeptime 250 ep_couch_response_timeout 600000 ep_couch_vbucket_batch_count 4 ep_data_age 0 ep_data_age_highwat 0 ep_db_cleaner_status running ep_db_shards 4 ep_db_strategy multiDB ep_dbinit 0 ep_dbname /opt/couchbase/var/lib/couchdb/default ep_degraded_mode 0 ep_diskqueue_drain 0 ep_diskqueue_fill 0 ep_diskqueue_items 0 ep_diskqueue_memory 0 ep_diskqueue_pending 0 ep_exp_pager_stime 3600 ep_exp_pager_stime 3600 ep_expired 0 ep_expiry_window 3 ep_failpartialwarmup 0 ep_flush_all false ep_flush_duration 0 ep_flush_duration_highwat 0 ep_flush_duration_total 0 ep_flush_preempts 0 ep_flusher_state initializing ep_flusher_todo 0 ep_getl_default_timeout 15 ep_getl_max_timeout 30 ep_ht_locks 5 ep_ht_size 3079 ep_inconsistent_slave_chk 0 ep_initfile ep_io_num_read 0 ep_io_num_write 0 ep_io_read_bytes 0 ep_io_write_bytes 0 ep_item_begin_failed 0 ep_item_commit_failed 0 ep_item_flush_expired 0 ep_item_flush_failed 0 ep_item_num_based_new_chk 1 ep_items_rm_from_checkpoints 0 ep_keep_closed_chks 0 ep_klog_block_size 4096 ep_klog_compactor_queue_cap 500000 ep_klog_compactor_stime 3600 ep_klog_flush commit2 ep_klog_max_entry_ratio 10 ep_klog_max_log_size 2147483647 ep_klog_path ep_klog_sync commit2 ep_kv_size 0 ep_latency_arith_cmd 0 ep_latency_get_cmd 0 ep_max_checkpoints 2 ep_max_data_size 209715200 ep_max_item_size 20971520 ep_max_size 209715200 ep_max_txn_size 10000 ep_max_vbuckets 256 ep_mem_high_wat 157286400 ep_mem_high_wat 18446744073709551615 ep_mem_low_wat 125829120 ep_mem_low_wat 18446744073709551615 ep_min_data_age 0 ep_mlog_compactor_runs 0 ep_mutation_mem_threshold 0 ep_num_active_non_resident 0 ep_num_checkpoint_remover_runs 0 ep_num_eject_failures 0 ep_num_eject_replicas 0 ep_num_expiry_pager_runs 0 ep_num_non_resident 0 ep_num_not_my_vbuckets 0 ep_num_pager_runs 0 ep_num_value_ejects 0 ep_obs_reg_clean_job 0 ep_observe_calls 0 ep_observe_errors 0 ep_observe_registry_size 0 ep_oom_errors 0 ep_overhead 2033034 ep_pending_ops 0 ep_pending_ops_max 0 ep_pending_ops_max_duration 0 ep_pending_ops_total 0 ep_postInitfile ep_queue_age_cap 900 ep_queue_size 0 ep_restore_file_checks 1 ep_restore_mode 0 ep_shardpattern %d/%b-%i.sqlite ep_stats_observe_polls 0 ep_storage_age 0 ep_storage_age_highwat 0 ep_storage_type featured ep_store_max_concurrency 10 ep_store_max_readers 9 ep_store_max_readwrite 1 ep_stored_val_type ep_tap_ack_grace_period 300 ep_tap_ack_initial_sequence_number 1 ep_tap_ack_interval 1000 ep_tap_ack_window_size 10 ep_tap_backfill_resident 0.9 ep_tap_backlog_limit 5000 ep_tap_backoff_period 5 ep_tap_bg_fetch_requeued 0 ep_tap_bg_fetched 0 ep_tap_bg_max_pending 500 ep_tap_conn_map_notifications 0 ep_tap_keepalive 300 ep_tap_noop_interval 20 ep_tap_requeue_sleep_time 0.1 ep_tap_throttle_queue_cap 1000000 ep_tap_throttle_threshold 90 ep_tmp_item_expiry_window 3600 ep_tmp_oom_errors 0 ep_too_old 0 ep_too_young 0 ep_total_cache_size 0 ep_total_del_items 0 ep_total_enqueued 0 ep_total_new_items 0 ep_total_observe_sets 0 ep_total_persisted 0 ep_uncommitted_items 0 ep_unobserve_calls 0 ep_value_size 0 ep_vb0 0 ep_vb_chunk_del_time 500 ep_vb_del_chunk_size 100 ep_vb_total 77 ep_vbucket_del 0 ep_vbucket_del_fail 0 ep_version 1.8.1r_467_g931cd34 ep_waitforwarmup 0 ep_warmup 1 ep_warmup_batch_size 1000 ep_warmup_min_items_threshold 10 ep_warmup_min_memory_threshold 10 get_hits 0 get_misses 0 incr_hits 0 incr_misses 0 libevent 2.0.11-stable limit_maxbytes 67108864 listen_disabled_num 0 mem_used 2428850 pid 2631 pointer_size 64 rejected_conns 0 rusage_system 0.129980 rusage_user 0.967852 threads 4 time 1332748984 total_connections 12 uptime 7 vb_active_curr_items 0 vb_active_eject 0 vb_active_ht_memory 1931160 vb_active_itm_memory 0 vb_active_num 77 vb_active_num_non_resident 0 vb_active_ops_create 0 vb_active_ops_delete 0 vb_active_ops_reject 0 vb_active_ops_update 0 vb_active_perc_mem_resident 0 vb_active_queue_age 0 vb_active_queue_drain 0 vb_active_queue_fill 0 vb_active_queue_memory 0 vb_active_queue_pending 0 vb_active_queue_size 0 vb_dead_num 0 vb_pending_curr_items 0 vb_pending_eject 0 vb_pending_ht_memory 0 vb_pending_itm_memory 0 vb_pending_num 0 vb_pending_num_non_resident 0 vb_pending_ops_create 0 vb_pending_ops_delete 0 vb_pending_ops_reject 0 vb_pending_ops_update 0 vb_pending_perc_mem_resident 0 vb_pending_queue_age 0 vb_pending_queue_drain 0 vb_pending_queue_fill 0 vb_pending_queue_memory 0 vb_pending_queue_pending 0 vb_pending_queue_size 0 vb_replica_curr_items 0 vb_replica_eject 0 vb_replica_ht_memory 0 vb_replica_itm_memory 0 vb_replica_num 0 vb_replica_num_non_resident 0 vb_replica_ops_create 0 vb_replica_ops_delete 0 vb_replica_ops_reject 0 vb_replica_ops_update 0 vb_replica_perc_mem_resident 0 vb_replica_queue_age 0 vb_replica_queue_drain 0 vb_replica_queue_fill 0 vb_replica_queue_memory 0 vb_replica_queue_pending 0 vb_replica_queue_size 0 version 1.4.4_516_g2f7d183 [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 101 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 102 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 103 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 104 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 105 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 106 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 107 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 108 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 109 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 110 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 111 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 112 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 113 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 114 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 115 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 116 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 117 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 118 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 119 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 120 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 121 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 122 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 123 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 124 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 125 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 126 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 127 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 128 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 129 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 130 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 131 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 132 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 133 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 134 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 135 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 136 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 137 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 138 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 139 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 140 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 141 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 142 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 143 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 144 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 145 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 146 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 147 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 148 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 149 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 150 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 151 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 152 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 153 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 154 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 155 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 156 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 157 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 158 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 159 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 160 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 161 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 162 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 163 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 164 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 165 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 166 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 167 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 168 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 169 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 170 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 171 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 172 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 173 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 174 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 175 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 176 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 177 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 178 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 179 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 180 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 181 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 182 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 183 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 184 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 185 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 186 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 187 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 188 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 189 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 190 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 191 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 192 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 193 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 194 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 195 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 196 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 197 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 198 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 199 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 200 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 201 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 202 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 203 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 204 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 205 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 206 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 207 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 208 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 209 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 210 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 211 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 212 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 213 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 214 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 215 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 216 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 217 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 218 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 219 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 220 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 221 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 222 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 223 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 224 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 225 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 226 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 227 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 228 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 229 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 230 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 231 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 232 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 233 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 234 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 235 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 236 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 237 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 238 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 239 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 240 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 241 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 242 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 243 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 244 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 245 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 246 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 247 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 248 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 249 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 250 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 251 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 252 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 253 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 254 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:05] [ns_1@127.0.0.1:<0.468.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 255 in "default" on 'ns_1@127.0.0.1' from missing to active. [views:info] [2012-03-26 1:03:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[]}] [views:info] [2012-03-26 1:03:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48, 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71, 72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94, 95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112, 113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129, 130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "default"}, <0.474.0>, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:<0.474.0>:single_bucket_sup:top_loop:27] Delegating exit {'EXIT', <0.385.0>, shutdown} to child supervisor: <0.475.0> [user:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:terminate:348] Shutting down bucket "default" on 'ns_1@127.0.0.1' for deletion [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/master">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/0">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/1">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/10">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/100">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/101">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/102">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/103">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/104">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/105">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/106">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/107">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/108">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/109">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/11">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/110">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/111">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/112">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/113">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/114">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/115">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/116">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/117">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/118">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/119">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/12">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/120">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/121">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/122">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/123">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/124">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/125">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/126">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/127">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/128">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/129">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/13">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/130">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/131">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/132">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/133">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/134">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/135">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/136">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/137">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/138">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/139">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/14">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/140">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/141">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/142">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/143">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/144">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/145">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/146">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/147">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/148">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/149">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/15">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/150">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/151">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/152">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/153">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/154">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/155">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/156">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/157">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/158">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/159">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/16">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/160">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/161">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/162">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/163">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/164">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/165">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/166">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/167">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/168">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/169">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/17">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/170">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/171">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/172">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/173">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/174">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/175">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/176">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/177">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/178">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/179">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/18">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/180">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/181">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/182">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/183">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/184">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/185">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/186">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/187">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/188">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/189">>: ok [ns_server:info] [2012-03-26 1:03:16] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/19">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/190">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/191">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/192">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/193">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/194">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/195">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Shutting down tap connections! [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/196">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/197">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/198">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/199">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/2">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/20">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/200">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/201">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/202">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/203">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/204">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/205">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/206">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/207">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/208">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/209">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/21">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/210">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/211">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/212">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/213">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/214">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/215">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/216">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/217">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/218">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/219">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/22">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/220">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/221">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/222">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/223">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/224">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/225">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/226">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/227">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/228">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/229">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/23">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/230">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/231">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/232">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/233">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/234">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/235">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/236">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/237">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/238">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/239">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/24">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/240">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/241">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/242">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/243">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/244">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/245">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/246">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/247">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/248">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/249">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/25">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/250">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/251">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/252">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/253">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/254">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/255">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/26">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/27">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/28">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/29">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/3">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/30">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/31">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/32">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/33">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/34">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/35">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/36">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/37">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/38">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/39">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/4">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/40">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/41">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/42">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/43">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/44">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/45">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/46">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/47">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/48">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/49">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/5">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/50">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/51">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/52">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/53">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/54">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/55">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/56">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/57">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/58">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/59">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/6">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/60">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/61">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/62">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/63">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/64">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/65">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/66">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/67">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/68">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/69">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/7">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/70">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/71">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/72">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/73">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/74">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/75">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/76">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/77">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/78">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/79">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/8">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/80">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/81">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/82">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/83">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/84">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/85">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/86">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/87">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/88">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/89">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/9">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/90">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/91">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/92">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/93">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/94">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/95">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/96">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/97">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/98">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/99">>: ok [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:<0.474.0>:single_bucket_sup:top_loop:24] per-bucket supervisor for "default" died with reason shutdown [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:<0.3482.0>:ns_port_sup:restart_port:134] restarting port: {moxi, "/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p", "0", "-Y", "y", "-O", "stderr", []], [{env, [{"EVENT_NOSELECT", "1"}, {"MOXI_SASL_PLAIN_USR", "Administrator"}, {"MOXI_SASL_PLAIN_PWD", "password"}]}, use_stdio, exit_status, port_server_send_eol, stderr_to_stdout, stream]} [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:<0.343.0>:menelaus_web:handle_streaming:950] menelaus_web streaming socket closed by client [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:<0.453.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:<0.453.0>:ns_port_server:log:166] moxi<0.453.0>: EOL on stdin. Exiting [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:<0.3483.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [error_logger:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.3483.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [menelaus:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:<0.3183.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:<0.3189.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:12("Created bucket \"default\" of type: membase\n") because it's been seen 1 times in the past 12.675816 secs (last seen 12.675816 secs ago [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "default"}, {single_bucket_sup, start_link, ["default"]}, permanent, infinity, supervisor, [single_bucket_sup]} [error_logger:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.3496.0>}, {name,{per_bucket_sup,"default"}}, {mfargs,{single_bucket_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [stats:error] [2012-03-26 1:03:17] [ns_1@127.0.0.1:<0.3191.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:03:17] [ns_1@127.0.0.1:<0.3193.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:ensure_bucket:700] Created bucket "default" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=209715200;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/default;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=default;couch_port=11213;max_vbuckets=256;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3500.0>}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3513.0>}, {name,{ns_memcached,data,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3514.0>}, {name,{ns_vbm_sup,"default"}}, {mfargs,{ns_vbm_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3515.0>}, {name,{ns_vbm_new_sup,"default"}}, {mfargs,{ns_vbm_new_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3516.0>}, {name,{couch_stats_reader,"default"}}, {mfargs,{couch_stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3517.0>}, {name,{stats_collector,"default"}}, {mfargs,{stats_collector,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3519.0>}, {name,{stats_archiver,"default"}}, {mfargs,{stats_archiver,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3521.0>}, {name,{stats_reader,"default"}}, {mfargs,{stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3522.0>}, {name,{failover_safeness_level,"default"}}, {mfargs, {failover_safeness_level,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.3499.0>}, {name,{ns_memcached_sup,"default"}}, {mfargs,{ns_memcached_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.3523.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 0 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 1 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 2 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 3 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 4 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 5 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 6 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 7 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 8 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 9 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 10 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 11 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 12 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 13 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 14 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 15 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 16 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 17 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 18 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 19 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 20 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 21 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 22 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 23 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 24 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 25 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 26 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 27 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 28 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 29 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 30 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 31 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 32 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 33 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 34 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 35 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 36 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 37 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 38 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 39 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 40 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 41 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 42 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 43 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 44 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 45 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 46 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 47 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 48 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 49 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 50 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 51 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 52 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 53 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 54 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 55 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 56 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 57 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 58 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 59 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 60 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 61 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 62 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 63 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 64 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 65 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 66 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 67 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 68 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 69 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 70 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 71 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 72 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 73 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 74 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 75 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 76 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 77 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 78 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 79 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 80 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 81 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 82 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 83 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 84 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 85 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 86 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 87 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 88 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 89 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 90 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 91 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 92 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 93 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 94 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 95 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 96 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 97 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 98 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 99 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 100 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 101 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 102 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 103 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 104 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 105 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 106 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 107 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 108 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 109 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 110 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 111 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 112 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 113 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 114 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 115 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 116 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 117 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 118 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 119 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 120 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 121 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 122 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 123 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 124 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 125 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 126 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 127 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 128 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 129 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 130 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 131 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 132 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 133 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 134 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 135 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 136 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 137 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 138 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 139 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 140 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 141 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 142 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 143 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 144 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 145 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 146 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 147 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 148 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 149 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 150 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 151 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 152 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 153 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 154 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 155 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 156 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 157 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 158 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 159 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 160 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 161 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 162 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 163 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 164 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 165 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 166 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 167 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 168 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 169 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 170 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 171 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 172 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 173 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 174 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 175 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 176 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 177 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 178 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 179 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 180 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 181 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 182 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 183 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 184 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 185 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 186 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 187 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 188 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 189 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 190 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 191 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 192 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 193 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 194 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 195 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 196 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 197 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 198 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 199 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 200 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 201 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 202 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 203 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 204 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 205 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 206 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 207 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 208 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 209 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 210 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 211 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 212 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 213 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 214 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 215 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 216 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 217 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 218 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 219 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 220 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 221 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 222 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 223 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 224 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 225 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 226 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 227 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 228 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 229 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 230 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 231 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 232 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 233 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 234 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 235 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 236 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 237 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 238 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 239 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 240 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 241 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 242 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 243 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 244 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 245 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 246 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 247 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 248 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 249 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 250 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 251 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 252 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 253 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 254 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 255 in <<"default">>: {not_found, no_db_file} [views:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [views:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [error_logger:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.3537.0>}, {name,{capi_set_view_manager,"default"}}, {mfargs,{capi_set_view_manager,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:<0.3484.0>:ns_port_server:log:166] moxi<0.3484.0>: 2012-03-26 01:03:17: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.3484.0>: 2012-03-26 01:03:17: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) moxi<0.3484.0>: 2012-03-26 01:03:19: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.3484.0>: "name": "default", moxi<0.3484.0>: "nodeLocator": "vbucket", moxi<0.3484.0>: "saslPassword": "", moxi<0.3484.0>: "nodes": [{ moxi<0.3484.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.3484.0>: "replication": 0, moxi<0.3484.0>: "clusterMembership": "active", moxi<0.3484.0>: "status": "warmup", moxi<0.3484.0>: "thisNode": true, moxi<0.3484.0>: "hostname": "127.0.0.1:8091", moxi<0.3484.0>: "clusterCompatibility": 1, moxi<0.3484.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.3484.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.3484.0>: "ports": { moxi<0.3484.0>: "proxy": 11211, moxi<0.3484.0>: "direct": 11210 moxi<0.3484.0>: } moxi<0.3484.0>: }], moxi<0.3484.0>: "vBucketServerMap": { moxi<0.3484.0>: "hashAlgorithm": "CRC", moxi<0.3484.0>: "numReplicas": 1, moxi<0.3484.0>: "serverList": ["127.0.0.1:11210"], moxi<0.3484.0>: "vBucketMap": [] moxi<0.3484.0>: } moxi<0.3484.0>: }) [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Failed to load mutation log, falling back to key dump memcached<0.366.0>: metadata loaded in 118 usec memcached<0.366.0>: warmup completed in 216 usec memcached<0.366.0>: Extension support isn't implemented in this version of bucket_engine [stats:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:<0.3517.0>:stats_collector:handle_info:84] Stats for bucket "default": accepting_conns 1 auth_cmds 0 auth_errors 0 bucket_active_conns 1 bucket_conns 2 bytes 177473 bytes_read 67 bytes_written 48 cas_badval 0 cas_hits 0 cas_misses 0 cmd_flush 0 cmd_get 0 cmd_set 0 conn_yields 0 connection_structures 0 curr_connections 12 curr_items 0 curr_items_tot 0 daemon_connections 10 decr_hits 0 decr_misses 0 delete_hits 0 delete_misses 0 ep_allow_data_loss_during_shutdown 1 ep_alog_block_size 4096 ep_alog_path ep_alog_sleep_time 30 ep_backend couchdb ep_bg_fetch_delay 0 ep_bg_fetched 0 ep_cache_size 0 ep_chk_max_items 30000 ep_chk_period 3600 ep_chk_remover_stime 5 ep_commit_num 0 ep_commit_time 0 ep_commit_time_total 0 ep_concurrentDB 1 ep_config_file ep_couch_bucket default ep_couch_default_batch_size 500 ep_couch_host localhost ep_couch_port 11213 ep_couch_reconnect_sleeptime 250 ep_couch_response_timeout 600000 ep_couch_vbucket_batch_count 4 ep_data_age 0 ep_data_age_highwat 0 ep_db_cleaner_status running ep_db_shards 4 ep_db_strategy multiDB ep_dbinit 0 ep_dbname /opt/couchbase/var/lib/couchdb/default ep_degraded_mode 0 ep_diskqueue_drain 0 ep_diskqueue_fill 0 ep_diskqueue_items 0 ep_diskqueue_memory 0 ep_diskqueue_pending 0 ep_exp_pager_stime 3600 ep_exp_pager_stime 3600 ep_expired 0 ep_expiry_window 3 ep_failpartialwarmup 0 ep_flush_all false ep_flush_duration 0 ep_flush_duration_highwat 0 ep_flush_duration_total 0 ep_flush_preempts 0 ep_flusher_state initializing ep_flusher_todo 0 ep_getl_default_timeout 15 ep_getl_max_timeout 30 ep_ht_locks 5 ep_ht_size 3079 ep_inconsistent_slave_chk 0 ep_initfile ep_io_num_read 0 ep_io_num_write 0 ep_io_read_bytes 0 ep_io_write_bytes 0 ep_item_begin_failed 0 ep_item_commit_failed 0 ep_item_flush_expired 0 ep_item_flush_failed 0 ep_item_num_based_new_chk 1 ep_items_rm_from_checkpoints 0 ep_keep_closed_chks 0 ep_klog_block_size 4096 ep_klog_compactor_queue_cap 500000 ep_klog_compactor_stime 3600 ep_klog_flush commit2 ep_klog_max_entry_ratio 10 ep_klog_max_log_size 2147483647 ep_klog_path ep_klog_sync commit2 ep_kv_size 0 ep_latency_arith_cmd 0 ep_latency_get_cmd 0 ep_max_checkpoints 2 ep_max_data_size 209715200 ep_max_item_size 20971520 ep_max_size 209715200 ep_max_txn_size 10000 ep_max_vbuckets 256 ep_mem_high_wat 157286400 ep_mem_high_wat 18446744073709551615 ep_mem_low_wat 125829120 ep_mem_low_wat 18446744073709551615 ep_min_data_age 0 ep_mlog_compactor_runs 0 ep_mutation_mem_threshold 0 ep_num_active_non_resident 0 ep_num_checkpoint_remover_runs 0 ep_num_eject_failures 0 ep_num_eject_replicas 0 ep_num_expiry_pager_runs 0 ep_num_non_resident 0 ep_num_not_my_vbuckets 0 ep_num_pager_runs 0 ep_num_value_ejects 0 ep_obs_reg_clean_job 0 ep_observe_calls 0 ep_observe_errors 0 ep_observe_registry_size 0 ep_oom_errors 0 ep_overhead 1312 ep_pending_ops 0 ep_pending_ops_max 0 ep_pending_ops_max_duration 0 ep_pending_ops_total 0 ep_postInitfile ep_queue_age_cap 900 ep_queue_size 0 ep_restore_file_checks 1 ep_restore_mode 0 ep_shardpattern %d/%b-%i.sqlite ep_stats_observe_polls 0 ep_storage_age 0 ep_storage_age_highwat 0 ep_storage_type featured ep_store_max_concurrency 10 ep_store_max_readers 9 ep_store_max_readwrite 1 ep_stored_val_type ep_tap_ack_grace_period 300 ep_tap_ack_initial_sequence_number 1 ep_tap_ack_interval 1000 ep_tap_ack_window_size 10 ep_tap_backfill_resident 0.9 ep_tap_backlog_limit 5000 ep_tap_backoff_period 5 ep_tap_bg_fetch_requeued 0 ep_tap_bg_fetched 0 ep_tap_bg_max_pending 500 ep_tap_conn_map_notifications 0 ep_tap_keepalive 300 ep_tap_noop_interval 20 ep_tap_requeue_sleep_time 0.1 ep_tap_throttle_queue_cap 1000000 ep_tap_throttle_threshold 90 ep_tmp_item_expiry_window 3600 ep_tmp_oom_errors 0 ep_too_old 0 ep_too_young 0 ep_total_cache_size 0 ep_total_del_items 0 ep_total_enqueued 0 ep_total_new_items 0 ep_total_observe_sets 0 ep_total_persisted 0 ep_uncommitted_items 0 ep_unobserve_calls 0 ep_value_size 0 ep_vb0 0 ep_vb_chunk_del_time 500 ep_vb_del_chunk_size 100 ep_vb_total 0 ep_vbucket_del 0 ep_vbucket_del_fail 0 ep_version 1.8.1r_467_g931cd34 ep_waitforwarmup 0 ep_warmup 1 ep_warmup_batch_size 1000 ep_warmup_min_items_threshold 10 ep_warmup_min_memory_threshold 10 get_hits 0 get_misses 0 incr_hits 0 incr_misses 0 libevent 2.0.11-stable limit_maxbytes 67108864 listen_disabled_num 0 mem_used 177473 pid 2631 pointer_size 64 rejected_conns 0 rusage_system 0.317951 rusage_user 1.454778 threads 4 time 1332748996 total_connections 17 uptime 19 vb_active_curr_items 0 vb_active_eject 0 vb_active_ht_memory 0 vb_active_itm_memory 0 vb_active_num 0 vb_active_num_non_resident 0 vb_active_ops_create 0 vb_active_ops_delete 0 vb_active_ops_reject 0 vb_active_ops_update 0 vb_active_perc_mem_resident 0 vb_active_queue_age 0 vb_active_queue_drain 0 vb_active_queue_fill 0 vb_active_queue_memory 0 vb_active_queue_pending 0 vb_active_queue_size 0 vb_dead_num 0 vb_pending_curr_items 0 vb_pending_eject 0 vb_pending_ht_memory 0 vb_pending_itm_memory 0 vb_pending_num 0 vb_pending_num_non_resident 0 vb_pending_ops_create 0 vb_pending_ops_delete 0 vb_pending_ops_reject 0 vb_pending_ops_update 0 vb_pending_perc_mem_resident 0 vb_pending_queue_age 0 vb_pending_queue_drain 0 vb_pending_queue_fill 0 vb_pending_queue_memory 0 vb_pending_queue_pending 0 vb_pending_queue_size 0 vb_replica_curr_items 0 vb_replica_eject 0 vb_replica_ht_memory 0 vb_replica_itm_memory 0 vb_replica_num 0 vb_replica_num_non_resident 0 vb_replica_ops_create 0 vb_replica_ops_delete 0 vb_replica_ops_reject 0 vb_replica_ops_update 0 vb_replica_perc_mem_resident 0 vb_replica_queue_age 0 vb_replica_queue_drain 0 vb_replica_queue_fill 0 vb_replica_queue_memory 0 vb_replica_queue_pending 0 vb_replica_queue_size 0 version 1.4.4_516_g2f7d183 [user:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:handle_info:310] Bucket "default" loaded on node 'ns_1@127.0.0.1' in 0 seconds. [ns_server:info] [2012-03-26 1:03:17] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log ns_memcached:1("Bucket \"default\" loaded on node 'ns_1@127.0.0.1' in 0 seconds.") because it's been seen 1 times in the past 12.568201 secs (last seen 12.568201 secs ago [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:ns_doctor:ns_doctor:update_status:207] The following buckets became ready on node 'ns_1@127.0.0.1': ["default"] [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3484.0>:ns_port_server:log:166] moxi<0.3484.0>: 2012-03-26 01:03:19: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.3484.0>: "name": "default", moxi<0.3484.0>: "nodeLocator": "vbucket", moxi<0.3484.0>: "saslPassword": "", moxi<0.3484.0>: "nodes": [{ moxi<0.3484.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.3484.0>: "replication": 0, moxi<0.3484.0>: "clusterMembership": "active", moxi<0.3484.0>: "status": "healthy", moxi<0.3484.0>: "thisNode": true, moxi<0.3484.0>: "hostname": "127.0.0.1:8091", moxi<0.3484.0>: "clusterCompatibility": 1, moxi<0.3484.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.3484.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.3484.0>: "ports": { moxi<0.3484.0>: "proxy": 11211, moxi<0.3484.0>: "direct": 11210 moxi<0.3484.0>: } moxi<0.3484.0>: }], moxi<0.3484.0>: "vBucketServerMap": { moxi<0.3484.0>: "hashAlgorithm": "CRC", moxi<0.3484.0>: "numReplicas": 1, moxi<0.3484.0>: "serverList": ["127.0.0.1:11210"], moxi<0.3484.0>: "vBucketMap": [] moxi<0.3484.0>: } moxi<0.3484.0>: }) [views:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]}, {passive,[]}, {ignore,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {replica,[]}] [views:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 0 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}, {map,[['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1'|...], [...]|...]}]}]}] [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 1 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 2 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 3 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 4 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 5 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 6 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 7 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 8 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 9 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 10 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 11 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 12 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 13 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 14 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 15 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 16 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 17 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 18 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 19 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 20 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 21 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 22 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 23 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 24 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: vbucket_map_history -> [{[['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1'|...], [...]|...], [{max_slaves,10}]}] [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 25 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 26 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 27 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 28 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 29 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 30 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 31 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 32 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 33 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 34 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 35 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 36 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 37 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 38 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 39 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 40 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 41 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 42 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 43 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 44 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 45 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 46 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 47 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 48 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 49 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 50 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 51 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 52 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 53 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 54 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 55 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 56 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 57 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 58 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 59 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 60 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 61 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 62 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 63 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 64 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 65 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 66 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 67 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 68 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 69 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 70 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 71 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 72 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 73 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 74 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 75 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 76 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 77 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 78 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 79 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 80 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 81 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 82 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 83 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 84 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 85 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 86 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 87 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 88 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 89 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 90 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 91 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 92 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 93 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 94 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 95 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 96 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 97 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 98 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 99 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 100 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 101 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 102 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 103 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 104 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 105 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 106 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 107 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 108 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 109 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 110 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 111 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 112 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 113 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 114 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 115 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 116 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 117 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 118 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 119 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 120 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 121 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 122 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 123 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 124 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 125 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 126 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 127 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 128 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 129 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 130 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 131 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 132 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 133 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 134 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 135 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 136 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 137 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 138 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 139 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 140 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 141 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 142 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 143 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 144 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 145 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 146 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 147 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 148 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 149 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 150 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 151 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 152 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 153 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 154 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 155 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 156 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 157 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 158 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 159 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 160 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 161 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 162 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 163 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 164 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 165 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 166 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 167 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 168 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 169 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 170 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 171 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 172 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 173 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 174 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 175 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 176 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 177 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 178 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 179 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 180 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 181 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 182 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 183 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 184 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 185 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 186 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 187 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 188 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 189 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 190 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 191 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 192 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 193 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 194 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 195 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 196 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 197 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 198 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 199 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 200 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 201 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 202 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 203 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 204 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 205 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 206 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 207 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 208 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 209 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 210 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 211 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 212 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 213 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 214 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 215 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 216 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 217 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 218 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 219 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 220 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 221 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 222 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 223 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 224 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 225 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 226 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 227 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 228 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 229 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 230 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 231 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 232 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 233 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 234 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 235 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 236 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 237 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 238 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 239 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 240 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 241 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 242 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 243 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 244 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 245 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 246 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 247 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 248 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 249 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 250 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 251 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 252 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 253 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 254 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:18] [ns_1@127.0.0.1:<0.3491.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 255 in "default" on 'ns_1@127.0.0.1' from missing to active. [views:info] [2012-03-26 1:03:22] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[]}] [views:info] [2012-03-26 1:03:22] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48, 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71, 72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94, 95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112, 113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129, 130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-03-26 1:03:28] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:03:28] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "default"}, <0.3496.0>, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:03:28] [ns_1@127.0.0.1:<0.3496.0>:single_bucket_sup:top_loop:27] Delegating exit {'EXIT', <0.385.0>, shutdown} to child supervisor: <0.3497.0> [ns_server:info] [2012-03-26 1:03:28] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [user:info] [2012-03-26 1:03:28] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:terminate:348] Shutting down bucket "default" on 'ns_1@127.0.0.1' for deletion [ns_server:info] [2012-03-26 1:03:28] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log ns_memcached:2("Shutting down bucket \"default\" on 'ns_1@127.0.0.1' for deletion") because it's been seen 1 times in the past 12.587827 secs (last seen 12.587827 secs ago [ns_server:info] [2012-03-26 1:03:28] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:28] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:28] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/master">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/0">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/1">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/10">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/100">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/101">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/102">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/103">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/104">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/105">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/106">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/107">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/108">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/109">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/11">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/110">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/111">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/112">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/113">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/114">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/115">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/116">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/117">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/118">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/119">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/12">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/120">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/121">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/122">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/123">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/124">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/125">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/126">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/127">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/128">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/129">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/13">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/130">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/131">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/132">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/133">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/134">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/135">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/136">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/137">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/138">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/139">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/14">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/140">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/141">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/142">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/143">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/144">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/145">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/146">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/147">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/148">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/149">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/15">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/150">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/151">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/152">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/153">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/154">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/155">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/156">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/157">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/158">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/159">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/16">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/160">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/161">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/162">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/163">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/164">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/165">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/166">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/167">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/168">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/169">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/17">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/170">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/171">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/172">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/173">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/174">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/175">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/176">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/177">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/178">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/179">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/18">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/180">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/181">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/182">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/183">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/184">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/185">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/186">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/187">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/188">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/189">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/19">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/190">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/191">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/192">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/193">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/194">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/195">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/196">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/197">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Shutting down tap connections! [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/198">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/199">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/2">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/20">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/200">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/201">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/202">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/203">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/204">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/205">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/206">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/207">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/208">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/209">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/21">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/210">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/211">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/212">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/213">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/214">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/215">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/216">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/217">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/218">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/219">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/22">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/220">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/221">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/222">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/223">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/224">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/225">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/226">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/227">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/228">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/229">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/23">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/230">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/231">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/232">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/233">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/234">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/235">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/236">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/237">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/238">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/239">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/24">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/240">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/241">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/242">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/243">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/244">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/245">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/246">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/247">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/248">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/249">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/25">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/250">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/251">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/252">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/253">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/254">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/255">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/26">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/27">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/28">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/29">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/3">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/30">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/31">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/32">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/33">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/34">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/35">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/36">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/37">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/38">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/39">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/4">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/40">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/41">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/42">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/43">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/44">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/45">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/46">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/47">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/48">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/49">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/5">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/50">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/51">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/52">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/53">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/54">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/55">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/56">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/57">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/58">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/59">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/6">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/60">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/61">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/62">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/63">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/64">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/65">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/66">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/67">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/68">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/69">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/7">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/70">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/71">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/72">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/73">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/74">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/75">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/76">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/77">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/78">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/79">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/8">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/80">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/81">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/82">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/83">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/84">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/85">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/86">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/87">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/88">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/89">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/9">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/90">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/91">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/92">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/93">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/94">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/95">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/96">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/97">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/98">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/99">>: ok [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:<0.3496.0>:single_bucket_sup:top_loop:24] per-bucket supervisor for "default" died with reason shutdown [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:<0.6448.0>:ns_port_sup:restart_port:134] restarting port: {moxi, "/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p", "0", "-Y", "y", "-O", "stderr", []], [{env, [{"EVENT_NOSELECT", "1"}, {"MOXI_SASL_PLAIN_USR", "Administrator"}, {"MOXI_SASL_PLAIN_PWD", "password"}]}, use_stdio, exit_status, port_server_send_eol, stderr_to_stdout, stream]} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:<0.3187.0>:menelaus_web:handle_streaming:950] menelaus_web streaming socket closed by client [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:<0.3484.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:<0.3484.0>:ns_port_server:log:166] moxi<0.3484.0>: EOL on stdin. Exiting [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:<0.6450.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [error_logger:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.6450.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [menelaus:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:<0.3213.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:11("Deleted bucket \"default\"\n") because it's been seen 1 times in the past 12.451759 secs (last seen 12.451759 secs ago [menelaus:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:<0.6157.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" of type: membase [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:148] Starting moxi: {"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", [], 13210, 8091} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:wait_for_memcached:278] Waiting for "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,13210}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [error_logger:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_moxi_sup} started: [{pid,<0.6464.0>}, {name, {"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", [],13210,8091}}, {mfargs, {ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-B","auto","-z", "url=http://127.0.0.1:8091/pools/default/bucketsStreaming/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", "-Z", "port_listen=13210,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-p","0","-Y","y","-O","stderr"], [use_stdio,stderr_to_stdout, {env, [{"MOXI_SASL_PLAIN_USR", "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}, {"MOXI_SASL_PLAIN_PWD",[]}]}]]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}, {single_bucket_sup, start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}, permanent, infinity, supervisor, [single_bucket_sup]} [error_logger:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.6468.0>}, {name, {per_bucket_sup, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {single_bucket_sup,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,13210}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [stats:error] [2012-03-26 1:03:29] [ns_1@127.0.0.1:<0.6159.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_memcached:ensure_bucket:700] Created bucket "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=209715200;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba;couch_port=11213;max_vbuckets=256;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.6471.0>}, {name, {ns_memcached,stats, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {ns_memcached,start_link, [{"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.6482.0>}, {name, {ns_memcached,data, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {ns_memcached,start_link, [{"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.6484.0>}, {name, {ns_vbm_sup, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {ns_vbm_sup,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.6485.0>}, {name, {ns_vbm_new_sup, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {ns_vbm_new_sup,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.6486.0>}, {name, {couch_stats_reader, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {couch_stats_reader,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.6487.0>}, {name, {stats_collector, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {stats_collector,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.6489.0>}, {name, {stats_archiver, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {stats_archiver,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-minute' [error_logger:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.6491.0>}, {name, {stats_reader, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {stats_reader,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.6494.0>}, {name, {failover_safeness_level, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {failover_safeness_level,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.6470.0>}, {name, {ns_memcached_sup, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {ns_memcached_sup,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [stats:error] [2012-03-26 1:03:29] [ns_1@127.0.0.1:<0.6163.0>:stats_reader:log_bad_responses:185] Bad replies: [{'ns_1@127.0.0.1', {error, {exit, {aborted, {no_exists, ['stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-minute']}}}}}] [error_logger:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.6495.0>}, {name, {capi_ddoc_replication_srv, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {capi_ddoc_replication_srv,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-minute', [{name,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-minute'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749009,841187},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,141,<0.6493.0>}} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-minute', [{name,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-minute'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749009,841187},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,141,<0.6493.0>}} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-hour' [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 0 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 1 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 2 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 3 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 4 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 5 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 6 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 7 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 8 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 9 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 10 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 11 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 12 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 13 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 14 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 15 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 16 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 17 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-hour', [{name,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-hour'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749009,856000},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,142,<0.6524.0>}} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-hour', [{name,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-hour'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749009,856000},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,142,<0.6524.0>}} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 18 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-day' [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 19 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 20 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 21 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 22 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 23 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 24 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 25 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 26 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 27 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 28 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 29 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 30 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 31 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 32 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 33 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 34 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-day', [{name,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-day'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749009,863859},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,143,<0.6551.0>}} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-day', [{name,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-day'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749009,863859},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,143,<0.6551.0>}} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 35 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-week' [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 36 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 37 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 38 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 39 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 40 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 41 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 42 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 43 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 44 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 45 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 46 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 47 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 48 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 49 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 50 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 51 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 52 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 53 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 54 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 55 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 56 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 57 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-week', [{name,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-week'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749009,872534},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,144,<0.6574.0>}} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 58 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-week', [{name,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-week'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749009,872534},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,144,<0.6574.0>}} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-month' [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 59 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 60 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 61 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 62 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 63 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 64 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 65 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 66 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 67 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 68 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 69 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 70 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 71 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 72 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 73 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 74 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 75 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 76 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 77 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 78 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 79 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 80 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 81 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 82 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 83 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 84 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-month', [{name,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-month'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749009,879200},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,145,<0.6605.0>}} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-month', [{name,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-month'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749009,879200},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,145,<0.6605.0>}} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-year' [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 85 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 86 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 87 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 88 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 89 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 90 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 91 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 92 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 93 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 94 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 95 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 96 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 97 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 98 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 99 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 100 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 101 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 102 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 103 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 104 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 105 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 106 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 107 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 108 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 109 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 110 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 111 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 112 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 113 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 114 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 115 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 116 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 117 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 118 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 119 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 120 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-year', [{name,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-year'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749009,886695},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,146,<0.6638.0>}} [ns_server:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-year', [{name,'stats_archiver-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba-year'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749009,886695},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,146,<0.6638.0>}} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 121 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 122 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 123 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 124 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 125 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 126 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 127 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 128 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 129 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 130 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 131 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 132 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 133 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 134 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 135 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 136 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 137 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 138 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 139 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 140 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 141 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 142 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 143 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 144 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 145 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 146 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 147 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 148 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 149 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 150 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 151 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 152 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 153 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 154 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 155 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 156 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 157 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 158 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 159 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 160 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 161 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 162 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 163 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 164 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 165 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 166 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 167 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 168 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 169 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 170 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 171 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 172 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 173 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 174 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 175 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 176 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 177 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 178 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 179 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 180 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 181 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 182 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 183 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 184 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 185 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 186 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 187 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 188 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 189 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 190 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 191 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 192 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 193 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 194 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 195 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 196 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 197 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 198 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 199 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 200 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 201 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 202 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 203 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 204 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 205 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 206 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 207 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 208 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 209 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 210 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 211 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 212 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 213 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 214 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 215 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 216 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 217 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 218 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 219 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 220 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 221 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 222 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 223 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 224 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 225 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 226 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 227 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 228 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 229 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 230 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 231 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 232 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 233 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 234 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 235 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 236 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 237 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 238 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 239 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 240 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 241 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 242 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 243 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 244 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 245 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 246 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 247 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 248 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 249 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 250 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 251 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 252 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 253 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 254 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 255 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [views:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':capi_set_view_manager:apply_map:445] Applying map to bucket recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [views:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':capi_set_view_manager:apply_map:450] Classified vbuckets for recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [error_logger:info] [2012-03-26 1:03:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.6518.0>}, {name, {capi_set_view_manager, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {capi_set_view_manager,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6451.0>:ns_port_server:log:166] moxi<0.6451.0>: 2012-03-26 01:03:29: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.6451.0>: 2012-03-26 01:03:29: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Warning: Data diretory does not exist, /opt/couchbase/var/lib/couchdb/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba memcached<0.366.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.366.0>: Failed to load mutation log, falling back to key dump memcached<0.366.0>: Warning: Data diretory is empty, /opt/couchbase/var/lib/couchdb/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba memcached<0.366.0>: metadata loaded in 295 usec memcached<0.366.0>: Warning: Data diretory is empty, /opt/couchbase/var/lib/couchdb/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba memcached<0.366.0>: warmup completed in 442 usec [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6464.0>:ns_port_server:log:166] moxi<0.6464.0>: 2012-03-26 01:03:29: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (57) moxi<0.6464.0>: 2012-03-26 01:03:31: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/bucketsStreaming/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({"name":"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba","bucketType":"membase","authType":"none","saslPassword":"","proxyPort":13210,"replicaIndex":true,"uri":"/pools/default/buckets/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba","streamingUri":"/pools/default/bucketsStreaming/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba","flushCacheUri":"/pools/default/buckets/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/controller/doFlush","nodes":[{"couchApiBase":"http://127.0.0.1:8092/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba","replication":0.0,"clusterMembership":"active","status":"warmup","thisNode":true,"hostname":"127.0.0.1:8091","clusterCompatibility":1,"version":"2.0.0r-944-re [user:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_memcached:handle_info:310] Bucket "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" loaded on node 'ns_1@127.0.0.1' in 0 seconds. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:ns_doctor:ns_doctor:update_status:207] The following buckets became ready on node 'ns_1@127.0.0.1': ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"] [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6464.0>:ns_port_server:log:166] moxi<0.6464.0>: 2012-03-26 01:03:32: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/bucketsStreaming/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({"name":"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba","bucketType":"membase","authType":"none","saslPassword":"","proxyPort":13210,"replicaIndex":true,"uri":"/pools/default/buckets/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba","streamingUri":"/pools/default/bucketsStreaming/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba","flushCacheUri":"/pools/default/buckets/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/controller/doFlush","nodes":[{"couchApiBase":"http://127.0.0.1:8092/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba","replication":0.0,"clusterMembership":"active","status":"healthy","thisNode":true,"hostname":"127.0.0.1:8091","clusterCompatibility":1,"version":"2.0.0r-944-r [stats:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6487.0>:stats_collector:handle_info:84] Stats for bucket "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba": accepting_conns 1 auth_cmds 0 auth_errors 0 bucket_active_conns 1 bucket_conns 2 bytes 118363 bytes_read 54 bytes_written 641 cas_badval 0 cas_hits 0 cas_misses 0 cmd_flush 0 cmd_get 0 cmd_set 0 conn_yields 0 connection_structures 0 curr_connections 12 curr_items 0 curr_items_tot 0 daemon_connections 10 decr_hits 0 decr_misses 0 delete_hits 0 delete_misses 0 ep_allow_data_loss_during_shutdown 1 ep_alog_block_size 4096 ep_alog_path ep_alog_sleep_time 30 ep_backend couchdb ep_bg_fetch_delay 0 ep_bg_fetched 0 ep_cache_size 0 ep_chk_max_items 30000 ep_chk_period 3600 ep_chk_remover_stime 5 ep_commit_num 0 ep_commit_time 0 ep_commit_time_total 0 ep_concurrentDB 1 ep_config_file ep_couch_bucket recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba ep_couch_default_batch_size 500 ep_couch_host localhost ep_couch_port 11213 ep_couch_reconnect_sleeptime 250 ep_couch_response_timeout 600000 ep_couch_vbucket_batch_count 4 ep_data_age 0 ep_data_age_highwat 0 ep_db_cleaner_status running ep_db_shards 4 ep_db_strategy multiDB ep_dbinit 0 ep_dbname /opt/couchbase/var/lib/couchdb/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba ep_degraded_mode 0 ep_diskqueue_drain 0 ep_diskqueue_fill 0 ep_diskqueue_items 0 ep_diskqueue_memory 0 ep_diskqueue_pending 0 ep_exp_pager_stime 3600 ep_exp_pager_stime 3600 ep_expired 0 ep_expiry_window 3 ep_failpartialwarmup 0 ep_flush_all false ep_flush_duration 0 ep_flush_duration_highwat 0 ep_flush_duration_total 0 ep_flush_preempts 0 ep_flusher_state initializing ep_flusher_todo 0 ep_getl_default_timeout 15 ep_getl_max_timeout 30 ep_ht_locks 5 ep_ht_size 3079 ep_inconsistent_slave_chk 0 ep_initfile ep_io_num_read 0 ep_io_num_write 0 ep_io_read_bytes 0 ep_io_write_bytes 0 ep_item_begin_failed 0 ep_item_commit_failed 0 ep_item_flush_expired 0 ep_item_flush_failed 0 ep_item_num_based_new_chk 1 ep_items_rm_from_checkpoints 0 ep_keep_closed_chks 0 ep_klog_block_size 4096 ep_klog_compactor_queue_cap 500000 ep_klog_compactor_stime 3600 ep_klog_flush commit2 ep_klog_max_entry_ratio 10 ep_klog_max_log_size 2147483647 ep_klog_path ep_klog_sync commit2 ep_kv_size 0 ep_latency_arith_cmd 0 ep_latency_get_cmd 0 ep_max_checkpoints 2 ep_max_data_size 209715200 ep_max_item_size 20971520 ep_max_size 209715200 ep_max_txn_size 10000 ep_max_vbuckets 256 ep_mem_high_wat 157286400 ep_mem_high_wat 18446744073709551615 ep_mem_low_wat 125829120 ep_mem_low_wat 18446744073709551615 ep_min_data_age 0 ep_mlog_compactor_runs 0 ep_mutation_mem_threshold 0 ep_num_active_non_resident 0 ep_num_checkpoint_remover_runs 0 ep_num_eject_failures 0 ep_num_eject_replicas 0 ep_num_expiry_pager_runs 0 ep_num_non_resident 0 ep_num_not_my_vbuckets 0 ep_num_pager_runs 0 ep_num_value_ejects 0 ep_obs_reg_clean_job 0 ep_observe_calls 0 ep_observe_errors 0 ep_observe_registry_size 0 ep_oom_errors 0 ep_overhead 1312 ep_pending_ops 0 ep_pending_ops_max 0 ep_pending_ops_max_duration 0 ep_pending_ops_total 0 ep_postInitfile ep_queue_age_cap 900 ep_queue_size 0 ep_restore_file_checks 1 ep_restore_mode 0 ep_shardpattern %d/%b-%i.sqlite ep_stats_observe_polls 0 ep_storage_age 0 ep_storage_age_highwat 0 ep_storage_type featured ep_store_max_concurrency 10 ep_store_max_readers 9 ep_store_max_readwrite 1 ep_stored_val_type ep_tap_ack_grace_period 300 ep_tap_ack_initial_sequence_number 1 ep_tap_ack_interval 1000 ep_tap_ack_window_size 10 ep_tap_backfill_resident 0.9 ep_tap_backlog_limit 5000 ep_tap_backoff_period 5 ep_tap_bg_fetch_requeued 0 ep_tap_bg_fetched 0 ep_tap_bg_max_pending 500 ep_tap_conn_map_notifications 0 ep_tap_keepalive 300 ep_tap_noop_interval 20 ep_tap_requeue_sleep_time 0.1 ep_tap_throttle_queue_cap 1000000 ep_tap_throttle_threshold 90 ep_tmp_item_expiry_window 3600 ep_tmp_oom_errors 0 ep_too_old 0 ep_too_young 0 ep_total_cache_size 0 ep_total_del_items 0 ep_total_enqueued 0 ep_total_new_items 0 ep_total_observe_sets 0 ep_total_persisted 0 ep_uncommitted_items 0 ep_unobserve_calls 0 ep_value_size 0 ep_vb0 0 ep_vb_chunk_del_time 500 ep_vb_del_chunk_size 100 ep_vb_total 0 ep_vbucket_del 0 ep_vbucket_del_fail 0 ep_version 1.8.1r_467_g931cd34 ep_waitforwarmup 0 ep_warmup 1 ep_warmup_batch_size 1000 ep_warmup_min_items_threshold 10 ep_warmup_min_memory_threshold 10 get_hits 0 get_misses 0 incr_hits 0 incr_misses 0 libevent 2.0.11-stable limit_maxbytes 67108864 listen_disabled_num 0 mem_used 118363 pid 2631 pointer_size 64 rejected_conns 0 rusage_system 0.522920 rusage_user 3.017541 threads 4 time 1332749009 total_connections 21 uptime 32 vb_active_curr_items 0 vb_active_eject 0 vb_active_ht_memory 0 vb_active_itm_memory 0 vb_active_num 0 vb_active_num_non_resident 0 vb_active_ops_create 0 vb_active_ops_delete 0 vb_active_ops_reject 0 vb_active_ops_update 0 vb_active_perc_mem_resident 0 vb_active_queue_age 0 vb_active_queue_drain 0 vb_active_queue_fill 0 vb_active_queue_memory 0 vb_active_queue_pending 0 vb_active_queue_size 0 vb_dead_num 0 vb_pending_curr_items 0 vb_pending_eject 0 vb_pending_ht_memory 0 vb_pending_itm_memory 0 vb_pending_num 0 vb_pending_num_non_resident 0 vb_pending_ops_create 0 vb_pending_ops_delete 0 vb_pending_ops_reject 0 vb_pending_ops_update 0 vb_pending_perc_mem_resident 0 vb_pending_queue_age 0 vb_pending_queue_drain 0 vb_pending_queue_fill 0 vb_pending_queue_memory 0 vb_pending_queue_pending 0 vb_pending_queue_size 0 vb_replica_curr_items 0 vb_replica_eject 0 vb_replica_ht_memory 0 vb_replica_itm_memory 0 vb_replica_num 0 vb_replica_num_non_resident 0 vb_replica_ops_create 0 vb_replica_ops_delete 0 vb_replica_ops_reject 0 vb_replica_ops_update 0 vb_replica_perc_mem_resident 0 vb_replica_queue_age 0 vb_replica_queue_drain 0 vb_replica_queue_fill 0 vb_replica_queue_memory 0 vb_replica_queue_pending 0 vb_replica_queue_size 0 version 1.4.4_516_g2f7d183 [views:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':capi_set_view_manager:apply_map:445] Applying map to bucket recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba: [{active,[]}, {passive,[]}, {ignore,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {replica,[]}] [views:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':capi_set_view_manager:apply_map:450] Classified vbuckets for recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 0 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 1 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,13210}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}, {map,[['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1'|...], [...]|...]}]}]}] [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 2 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 3 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 4 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 5 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 6 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 7 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 8 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 9 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: vbucket_map_history -> [{[['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1'|...], [...]|...], [{max_slaves,10}]}] [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 10 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 11 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 12 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 13 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 14 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 15 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 16 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 17 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 18 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 19 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 20 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 21 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 22 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 23 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 24 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 25 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 26 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 27 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 28 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 29 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 30 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 31 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 32 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 33 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 34 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 35 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 36 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 37 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 38 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 39 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 40 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 41 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 42 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 43 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 44 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 45 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 46 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 47 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 48 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 49 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 50 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 51 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 52 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 53 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 54 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 55 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 56 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 57 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 58 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 59 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 60 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 61 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 62 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 63 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 64 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 65 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 66 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 67 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 68 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 69 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 70 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 71 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 72 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 73 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 74 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 75 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 76 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 77 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 78 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 79 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 80 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 81 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 82 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 83 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 84 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 85 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 86 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 87 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 88 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 89 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 90 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 91 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 92 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 93 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 94 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 95 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 96 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 97 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 98 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 99 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 100 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 101 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 102 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 103 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 104 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 105 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 106 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 107 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 108 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 109 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 110 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 111 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 112 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 113 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 114 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 115 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 116 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 117 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 118 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 119 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 120 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 121 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 122 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 123 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 124 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 125 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 126 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 127 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 128 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 129 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 130 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 131 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 132 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 133 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 134 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 135 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 136 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 137 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 138 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 139 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 140 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 141 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 142 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 143 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 144 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 145 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 146 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 147 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 148 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 149 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 150 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 151 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 152 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 153 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 154 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 155 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 156 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 157 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 158 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 159 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 160 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 161 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 162 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 163 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 164 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 165 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 166 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 167 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 168 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 169 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 170 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 171 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 172 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 173 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 174 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 175 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 176 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 177 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 178 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 179 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 180 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 181 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 182 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 183 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 184 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 185 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 186 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 187 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 188 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 189 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 190 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 191 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 192 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 193 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 194 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 195 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 196 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 197 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 198 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 199 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 200 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 201 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 202 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 203 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 204 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 205 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 206 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 207 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 208 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 209 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 210 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 211 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 212 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 213 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 214 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 215 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 216 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 217 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 218 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 219 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 220 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 221 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 222 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 223 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 224 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 225 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 226 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 227 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 228 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 229 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 230 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 231 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 232 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 233 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 234 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 235 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 236 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 237 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 238 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 239 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 240 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 241 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 242 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 243 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 244 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 245 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 246 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 247 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 248 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 249 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 250 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 251 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 252 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 253 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 254 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:30] [ns_1@127.0.0.1:<0.6463.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 255 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [views:info] [2012-03-26 1:03:34] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':capi_set_view_manager:apply_map:445] Applying map to bucket recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[]}] [views:info] [2012-03-26 1:03:34] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':capi_set_view_manager:apply_map:450] Classified vbuckets for recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48, 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71, 72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94, 95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112, 113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129, 130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}, <0.6468.0>, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:<0.6468.0>:single_bucket_sup:top_loop:27] Delegating exit {'EXIT', <0.385.0>, shutdown} to child supervisor: <0.6469.0> [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [user:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_memcached:terminate:348] Shutting down bucket "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' for deletion [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:<0.6161.0>:menelaus_web:handle_streaming:943] closing streaming socket [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:138] Killing unwanted moxi: {"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", [], 13210, 8091} [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:<0.6464.0>:ns_port_server:log:166] moxi<0.6464.0>: 2012-03-26 01:03:43: (agent_config.c.1234) ERROR: invalid, empty config from REST server http://127.0.0.1:8091/pools/default/bucketsStreaming/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/master">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/0">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/1">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/10">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/100">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/101">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/102">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/103">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/104">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/105">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/106">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/107">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/108">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/109">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/11">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/110">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/111">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/112">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/113">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/114">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/115">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/116">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/117">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/118">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/119">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/12">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/120">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/121">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/122">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/123">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/124">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/125">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/126">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/127">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/128">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/129">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/13">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/130">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/131">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/132">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/133">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/134">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/135">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/136">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/137">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/138">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/139">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/14">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/140">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/141">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/142">>: ok [ns_server:info] [2012-03-26 1:03:41] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/143">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/144">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/145">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/146">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/147">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/148">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/149">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/15">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/150">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/151">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/152">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/153">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/154">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/155">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/156">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/157">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/158">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/159">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/16">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/160">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/161">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/162">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/163">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/164">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/165">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/166">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/167">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/168">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/169">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/17">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/170">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/171">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/172">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/173">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/174">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/175">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/176">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/177">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/178">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/179">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/18">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/180">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/181">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/182">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/183">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/184">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Shutting down tap connections! [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/185">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/186">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/187">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/188">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/189">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/19">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/190">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/191">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/192">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/193">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/194">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/195">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/196">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/197">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/198">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/199">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/2">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/20">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/200">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/201">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/202">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/203">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/204">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/205">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/206">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/207">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/208">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/209">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/21">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/210">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/211">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/212">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/213">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/214">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/215">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/216">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/217">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/218">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/219">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/22">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/220">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/221">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/222">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/223">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/224">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/225">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/226">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/227">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/228">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/229">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/23">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/230">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/231">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/232">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/233">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/234">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/235">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/236">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/237">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/238">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/239">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/24">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/240">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/241">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/242">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/243">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/244">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/245">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/246">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/247">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/248">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/249">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/25">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/250">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/251">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/252">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/253">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/254">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/255">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/26">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/27">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/28">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/29">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/3">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/30">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/31">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/32">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/33">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/34">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/35">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/36">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/37">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/38">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/39">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/4">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/40">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/41">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/42">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/43">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/44">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/45">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/46">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/47">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/48">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/49">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/5">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/50">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/51">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/52">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/53">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/54">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/55">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/56">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/57">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/58">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/59">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/6">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/60">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/61">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/62">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/63">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/64">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/65">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/66">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/67">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/68">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/69">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/7">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/70">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/71">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/72">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/73">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/74">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/75">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/76">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/77">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/78">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/79">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/8">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/80">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/81">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/82">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/83">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/84">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/85">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/86">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/87">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/88">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/89">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/9">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/90">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/91">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/92">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/93">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/94">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/95">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/96">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/97">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/98">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/99">>: ok [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:<0.6468.0>:single_bucket_sup:top_loop:24] per-bucket supervisor for "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" died with reason shutdown [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:<0.9474.0>:ns_port_sup:restart_port:134] restarting port: {moxi, "/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p", "0", "-Y", "y", "-O", "stderr", []], [{env, [{"EVENT_NOSELECT", "1"}, {"MOXI_SASL_PLAIN_USR", "Administrator"}, {"MOXI_SASL_PLAIN_PWD", "password"}]}, use_stdio, exit_status, port_server_send_eol, stderr_to_stdout, stream]} [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:<0.3824.0>:menelaus_web:handle_streaming:950] menelaus_web streaming socket closed by client [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:<0.6451.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:<0.6451.0>:ns_port_server:log:166] moxi<0.6451.0>: EOL on stdin. Exiting [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:<0.9475.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [error_logger:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.9475.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [menelaus:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:<0.9172.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" [menelaus:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:<0.9178.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" of type: membase [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:12("Created bucket \"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba\" of type: membase\n") because it's been seen 1 times in the past 12.613043 secs (last seen 12.613043 secs ago [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:148] Starting moxi: {"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", [], 13210, 8091} [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:wait_for_memcached:278] Waiting for "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,13210}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [error_logger:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_moxi_sup} started: [{pid,<0.9485.0>}, {name, {"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", [],13210,8091}}, {mfargs, {ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-B","auto","-z", "url=http://127.0.0.1:8091/pools/default/bucketsStreaming/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", "-Z", "port_listen=13210,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-p","0","-Y","y","-O","stderr"], [use_stdio,stderr_to_stdout, {env, [{"MOXI_SASL_PLAIN_USR", "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}, {"MOXI_SASL_PLAIN_PWD",[]}]}]]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}, {single_bucket_sup, start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}, permanent, infinity, supervisor, [single_bucket_sup]} [error_logger:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.9488.0>}, {name, {per_bucket_sup, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {single_bucket_sup,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,13210}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [stats:error] [2012-03-26 1:03:42] [ns_1@127.0.0.1:<0.9180.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [error_logger:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.9491.0>}, {name, {ns_memcached,stats, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {ns_memcached,start_link, [{"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_memcached:ensure_bucket:700] Created bucket "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=209715200;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba;couch_port=11213;max_vbuckets=256;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [error_logger:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.9503.0>}, {name, {ns_memcached,data, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {ns_memcached,start_link, [{"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.9505.0>}, {name, {ns_vbm_sup, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {ns_vbm_sup,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.9506.0>}, {name, {ns_vbm_new_sup, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {ns_vbm_new_sup,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.9507.0>}, {name, {couch_stats_reader, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {couch_stats_reader,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.9508.0>}, {name, {stats_collector, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {stats_collector,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.9510.0>}, {name, {stats_archiver, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {stats_archiver,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.9512.0>}, {name, {stats_reader, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {stats_reader,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.9513.0>}, {name, {failover_safeness_level, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {failover_safeness_level,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.9490.0>}, {name, {ns_memcached_sup, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {ns_memcached_sup,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.9514.0>}, {name, {capi_ddoc_replication_srv, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {capi_ddoc_replication_srv,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 0 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 1 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 2 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 3 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 4 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 5 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 6 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 7 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 8 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 9 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 10 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 11 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 12 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 13 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 14 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 15 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 16 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 17 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 18 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 19 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 20 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 21 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 22 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 23 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 24 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 25 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 26 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 27 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 28 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 29 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 30 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 31 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 32 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 33 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 34 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 35 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 36 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 37 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 38 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 39 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 40 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 41 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 42 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 43 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 44 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 45 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 46 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 47 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 48 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 49 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 50 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 51 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 52 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 53 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 54 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 55 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 56 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 57 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 58 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 59 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 60 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 61 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 62 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 63 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 64 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 65 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 66 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 67 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 68 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 69 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 70 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 71 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 72 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 73 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 74 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 75 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 76 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 77 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 78 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 79 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 80 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 81 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 82 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 83 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 84 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 85 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 86 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 87 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 88 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 89 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 90 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 91 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 92 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 93 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 94 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 95 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 96 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 97 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 98 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 99 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 100 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 101 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 102 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 103 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 104 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 105 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 106 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 107 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 108 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 109 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 110 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 111 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 112 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 113 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 114 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 115 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 116 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 117 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 118 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 119 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 120 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 121 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 122 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 123 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 124 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 125 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 126 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 127 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 128 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 129 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 130 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 131 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 132 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 133 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 134 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 135 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 136 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 137 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 138 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 139 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 140 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 141 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 142 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 143 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 144 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 145 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 146 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 147 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 148 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 149 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 150 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 151 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 152 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 153 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 154 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 155 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 156 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 157 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 158 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 159 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 160 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 161 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 162 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 163 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 164 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 165 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 166 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 167 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 168 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 169 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 170 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 171 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 172 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 173 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 174 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 175 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 176 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 177 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 178 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 179 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 180 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 181 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 182 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 183 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 184 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 185 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 186 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 187 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 188 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 189 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 190 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 191 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 192 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 193 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 194 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 195 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 196 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 197 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 198 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 199 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 200 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 201 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 202 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 203 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 204 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 205 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 206 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 207 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 208 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 209 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 210 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 211 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 212 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 213 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 214 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 215 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 216 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 217 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 218 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 219 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 220 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 221 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 222 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 223 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 224 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 225 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 226 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 227 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 228 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 229 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 230 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 231 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 232 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 233 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 234 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 235 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 236 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 237 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 238 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 239 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 240 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 241 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 242 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 243 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 244 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 245 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 246 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 247 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 248 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 249 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 250 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 251 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 252 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 253 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 254 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':couch_log:error:42] MC daemon: Error opening vb 255 in <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba">>: {not_found, no_db_file} [views:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':capi_set_view_manager:apply_map:445] Applying map to bucket recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [views:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':capi_set_view_manager:apply_map:450] Classified vbuckets for recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [error_logger:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba'} started: [{pid,<0.9531.0>}, {name, {capi_set_view_manager, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}}, {mfargs, {capi_set_view_manager,start_link, ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:<0.9476.0>:ns_port_server:log:166] moxi<0.9476.0>: 2012-03-26 01:03:42: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.9476.0>: 2012-03-26 01:03:42: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:<0.9485.0>:ns_port_server:log:166] moxi<0.9485.0>: 2012-03-26 01:03:42: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (57) moxi<0.9485.0>: 2012-03-26 01:03:44: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/bucketsStreaming/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({"name":"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba","bucketType":"membase","authType":"none","saslPassword":"","proxyPort":13210,"replicaIndex":true,"uri":"/pools/default/buckets/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba","streamingUri":"/pools/default/bucketsStreaming/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba","flushCacheUri":"/pools/default/buckets/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/controller/doFlush","nodes":[{"couchApiBase":"http://127.0.0.1:8092/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba","replication":0.0,"clusterMembership":"active","status":"warmup","thisNode":true,"hostname":"127.0.0.1:8091","clusterCompatibility":1,"version":"2.0.0r-944-re [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.366.0>: Failed to load mutation log, falling back to key dump memcached<0.366.0>: metadata loaded in 177 usec memcached<0.366.0>: warmup completed in 296 usec [stats:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:<0.9508.0>:stats_collector:handle_info:84] Stats for bucket "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba": accepting_conns 1 auth_cmds 0 auth_errors 0 bucket_active_conns 1 bucket_conns 2 bytes 118598 bytes_read 24 bytes_written 48 cas_badval 0 cas_hits 0 cas_misses 0 cmd_flush 0 cmd_get 0 cmd_set 0 conn_yields 0 connection_structures 0 curr_connections 12 curr_items 0 curr_items_tot 0 daemon_connections 10 decr_hits 0 decr_misses 0 delete_hits 0 delete_misses 0 ep_allow_data_loss_during_shutdown 1 ep_alog_block_size 4096 ep_alog_path ep_alog_sleep_time 30 ep_backend couchdb ep_bg_fetch_delay 0 ep_bg_fetched 0 ep_cache_size 0 ep_chk_max_items 30000 ep_chk_period 3600 ep_chk_remover_stime 5 ep_commit_num 0 ep_commit_time 0 ep_commit_time_total 0 ep_concurrentDB 1 ep_config_file ep_couch_bucket recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba ep_couch_default_batch_size 500 ep_couch_host localhost ep_couch_port 11213 ep_couch_reconnect_sleeptime 250 ep_couch_response_timeout 600000 ep_couch_vbucket_batch_count 4 ep_data_age 0 ep_data_age_highwat 0 ep_db_cleaner_status running ep_db_shards 4 ep_db_strategy multiDB ep_dbinit 0 ep_dbname /opt/couchbase/var/lib/couchdb/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba ep_degraded_mode 0 ep_diskqueue_drain 0 ep_diskqueue_fill 0 ep_diskqueue_items 0 ep_diskqueue_memory 0 ep_diskqueue_pending 0 ep_exp_pager_stime 3600 ep_exp_pager_stime 3600 ep_expired 0 ep_expiry_window 3 ep_failpartialwarmup 0 ep_flush_all false ep_flush_duration 0 ep_flush_duration_highwat 0 ep_flush_duration_total 0 ep_flush_preempts 0 ep_flusher_state initializing ep_flusher_todo 0 ep_getl_default_timeout 15 ep_getl_max_timeout 30 ep_ht_locks 5 ep_ht_size 3079 ep_inconsistent_slave_chk 0 ep_initfile ep_io_num_read 0 ep_io_num_write 0 ep_io_read_bytes 0 ep_io_write_bytes 0 ep_item_begin_failed 0 ep_item_commit_failed 0 ep_item_flush_expired 0 ep_item_flush_failed 0 ep_item_num_based_new_chk 1 ep_items_rm_from_checkpoints 0 ep_keep_closed_chks 0 ep_klog_block_size 4096 ep_klog_compactor_queue_cap 500000 ep_klog_compactor_stime 3600 ep_klog_flush commit2 ep_klog_max_entry_ratio 10 ep_klog_max_log_size 2147483647 ep_klog_path ep_klog_sync commit2 ep_kv_size 0 ep_latency_arith_cmd 0 ep_latency_get_cmd 0 ep_max_checkpoints 2 ep_max_data_size 209715200 ep_max_item_size 20971520 ep_max_size 209715200 ep_max_txn_size 10000 ep_max_vbuckets 256 ep_mem_high_wat 157286400 ep_mem_high_wat 18446744073709551615 ep_mem_low_wat 125829120 ep_mem_low_wat 18446744073709551615 ep_min_data_age 0 ep_mlog_compactor_runs 0 ep_mutation_mem_threshold 0 ep_num_active_non_resident 0 ep_num_checkpoint_remover_runs 0 ep_num_eject_failures 0 ep_num_eject_replicas 0 ep_num_expiry_pager_runs 0 ep_num_non_resident 0 ep_num_not_my_vbuckets 0 ep_num_pager_runs 0 ep_num_value_ejects 0 ep_obs_reg_clean_job 0 ep_observe_calls 0 ep_observe_errors 0 ep_observe_registry_size 0 ep_oom_errors 0 ep_overhead 1312 ep_pending_ops 0 ep_pending_ops_max 0 ep_pending_ops_max_duration 0 ep_pending_ops_total 0 ep_postInitfile ep_queue_age_cap 900 ep_queue_size 0 ep_restore_file_checks 1 ep_restore_mode 0 ep_shardpattern %d/%b-%i.sqlite ep_stats_observe_polls 0 ep_storage_age 0 ep_storage_age_highwat 0 ep_storage_type featured ep_store_max_concurrency 10 ep_store_max_readers 9 ep_store_max_readwrite 1 ep_stored_val_type ep_tap_ack_grace_period 300 ep_tap_ack_initial_sequence_number 1 ep_tap_ack_interval 1000 ep_tap_ack_window_size 10 ep_tap_backfill_resident 0.9 ep_tap_backlog_limit 5000 ep_tap_backoff_period 5 ep_tap_bg_fetch_requeued 0 ep_tap_bg_fetched 0 ep_tap_bg_max_pending 500 ep_tap_conn_map_notifications 0 ep_tap_keepalive 300 ep_tap_noop_interval 20 ep_tap_requeue_sleep_time 0.1 ep_tap_throttle_queue_cap 1000000 ep_tap_throttle_threshold 90 ep_tmp_item_expiry_window 3600 ep_tmp_oom_errors 0 ep_too_old 0 ep_too_young 0 ep_total_cache_size 0 ep_total_del_items 0 ep_total_enqueued 0 ep_total_new_items 0 ep_total_observe_sets 0 ep_total_persisted 0 ep_uncommitted_items 0 ep_unobserve_calls 0 ep_value_size 0 ep_vb0 0 ep_vb_chunk_del_time 500 ep_vb_del_chunk_size 100 ep_vb_total 0 ep_vbucket_del 0 ep_vbucket_del_fail 0 ep_version 1.8.1r_467_g931cd34 ep_waitforwarmup 0 ep_warmup 1 ep_warmup_batch_size 1000 ep_warmup_min_items_threshold 10 ep_warmup_min_memory_threshold 10 get_hits 0 get_misses 0 incr_hits 0 incr_misses 0 libevent 2.0.11-stable limit_maxbytes 67108864 listen_disabled_num 0 mem_used 118598 pid 2631 pointer_size 64 rejected_conns 0 rusage_system 0.730888 rusage_user 3.516465 threads 4 time 1332749021 total_connections 26 uptime 44 vb_active_curr_items 0 vb_active_eject 0 vb_active_ht_memory 0 vb_active_itm_memory 0 vb_active_num 0 vb_active_num_non_resident 0 vb_active_ops_create 0 vb_active_ops_delete 0 vb_active_ops_reject 0 vb_active_ops_update 0 vb_active_perc_mem_resident 0 vb_active_queue_age 0 vb_active_queue_drain 0 vb_active_queue_fill 0 vb_active_queue_memory 0 vb_active_queue_pending 0 vb_active_queue_size 0 vb_dead_num 0 vb_pending_curr_items 0 vb_pending_eject 0 vb_pending_ht_memory 0 vb_pending_itm_memory 0 vb_pending_num 0 vb_pending_num_non_resident 0 vb_pending_ops_create 0 vb_pending_ops_delete 0 vb_pending_ops_reject 0 vb_pending_ops_update 0 vb_pending_perc_mem_resident 0 vb_pending_queue_age 0 vb_pending_queue_drain 0 vb_pending_queue_fill 0 vb_pending_queue_memory 0 vb_pending_queue_pending 0 vb_pending_queue_size 0 vb_replica_curr_items 0 vb_replica_eject 0 vb_replica_ht_memory 0 vb_replica_itm_memory 0 vb_replica_num 0 vb_replica_num_non_resident 0 vb_replica_ops_create 0 vb_replica_ops_delete 0 vb_replica_ops_reject 0 vb_replica_ops_update 0 vb_replica_perc_mem_resident 0 vb_replica_queue_age 0 vb_replica_queue_drain 0 vb_replica_queue_fill 0 vb_replica_queue_memory 0 vb_replica_queue_pending 0 vb_replica_queue_size 0 version 1.4.4_516_g2f7d183 [user:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_memcached:handle_info:310] Bucket "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" loaded on node 'ns_1@127.0.0.1' in 0 seconds. [ns_server:info] [2012-03-26 1:03:42] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log ns_memcached:1("Bucket \"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba\" loaded on node 'ns_1@127.0.0.1' in 0 seconds.") because it's been seen 1 times in the past 12.612942 secs (last seen 12.612942 secs ago [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:ns_doctor:ns_doctor:update_status:207] The following buckets became ready on node 'ns_1@127.0.0.1': ["recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"] [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9485.0>:ns_port_server:log:166] moxi<0.9485.0>: 2012-03-26 01:03:45: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/bucketsStreaming/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({"name":"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba","bucketType":"membase","authType":"none","saslPassword":"","proxyPort":13210,"replicaIndex":true,"uri":"/pools/default/buckets/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba","streamingUri":"/pools/default/bucketsStreaming/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba","flushCacheUri":"/pools/default/buckets/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/controller/doFlush","nodes":[{"couchApiBase":"http://127.0.0.1:8092/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba","replication":0.0,"clusterMembership":"active","status":"healthy","thisNode":true,"hostname":"127.0.0.1:8091","clusterCompatibility":1,"version":"2.0.0r-944-r [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 0 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 1 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 2 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [views:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':capi_set_view_manager:apply_map:445] Applying map to bucket recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba: [{active,[]}, {passive,[]}, {ignore,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {replica,[]}] [views:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':capi_set_view_manager:apply_map:450] Classified vbuckets for recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,13210}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}, {map,[['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1'|...], [...]|...]}]}]}] [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 3 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 4 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 5 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 6 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 7 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 8 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 9 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 10 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 11 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 12 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 13 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 14 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 15 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 16 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 17 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 18 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 19 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 20 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 21 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 22 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 23 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 24 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 25 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 26 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 27 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 28 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 29 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 30 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 31 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 32 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 33 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 34 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: vbucket_map_history -> [{[['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1'|...], [...]|...], [{max_slaves,10}]}] [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 35 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 36 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 37 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 38 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 39 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 40 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 41 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 42 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 43 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 44 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 45 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 46 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 47 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 48 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 49 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 50 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 51 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 52 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 53 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 54 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 55 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 56 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 57 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 58 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 59 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 60 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 61 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 62 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 63 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 64 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 65 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 66 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 67 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 68 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 69 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 70 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 71 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 72 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 73 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 74 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 75 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 76 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 77 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 78 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 79 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 80 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 81 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 82 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 83 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 84 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 85 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 86 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 87 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 88 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 89 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 90 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 91 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 92 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 93 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 94 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 95 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 96 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 97 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 98 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 99 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 100 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 101 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 102 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 103 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 104 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 105 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 106 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 107 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 108 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 109 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 110 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 111 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 112 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 113 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 114 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 115 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 116 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 117 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 118 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 119 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 120 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 121 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 122 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 123 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 124 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 125 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 126 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 127 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 128 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 129 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 130 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 131 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 132 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 133 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 134 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 135 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 136 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 137 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 138 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 139 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 140 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 141 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 142 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 143 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 144 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 145 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 146 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 147 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 148 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 149 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 150 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 151 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 152 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 153 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 154 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 155 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 156 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 157 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 158 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 159 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 160 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 161 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 162 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 163 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 164 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 165 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 166 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 167 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 168 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 169 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 170 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 171 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 172 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 173 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 174 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 175 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 176 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 177 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 178 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 179 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 180 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 181 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 182 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 183 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 184 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 185 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 186 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 187 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 188 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 189 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 190 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 191 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 192 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 193 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 194 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 195 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 196 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 197 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 198 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 199 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 200 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 201 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 202 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 203 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 204 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 205 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 206 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 207 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 208 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 209 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 210 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 211 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 212 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 213 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 214 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 215 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 216 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 217 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 218 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 219 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 220 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 221 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 222 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 223 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 224 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 225 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 226 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 227 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 228 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 229 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 230 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 231 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 232 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 233 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 234 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 235 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 236 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 237 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 238 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 239 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 240 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 241 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 242 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 243 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 244 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 245 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 246 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 247 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 248 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 249 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 250 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 251 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 252 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 253 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 254 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:43] [ns_1@127.0.0.1:<0.9483.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 255 in "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' from missing to active. [views:info] [2012-03-26 1:03:47] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':capi_set_view_manager:apply_map:445] Applying map to bucket recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[]}] [views:info] [2012-03-26 1:03:47] [ns_1@127.0.0.1:'capi_set_view_manager-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':capi_set_view_manager:apply_map:450] Classified vbuckets for recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48, 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71, 72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94, 95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112, 113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129, 130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-03-26 1:03:53] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:03:53] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba"}, <0.9488.0>, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:03:53] [ns_1@127.0.0.1:<0.9488.0>:single_bucket_sup:top_loop:27] Delegating exit {'EXIT', <0.385.0>, shutdown} to child supervisor: <0.9489.0> [ns_server:info] [2012-03-26 1:03:53] [ns_1@127.0.0.1:<0.9182.0>:menelaus_web:handle_streaming:943] closing streaming socket [ns_server:info] [2012-03-26 1:03:53] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:138] Killing unwanted moxi: {"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba", [], 13210, 8091} [user:info] [2012-03-26 1:03:53] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_memcached:terminate:348] Shutting down bucket "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" on 'ns_1@127.0.0.1' for deletion [ns_server:info] [2012-03-26 1:03:53] [ns_1@127.0.0.1:<0.9485.0>:ns_port_server:log:166] moxi<0.9485.0>: 2012-03-26 01:03:55: (agent_config.c.1234) ERROR: invalid, empty config from REST server http://127.0.0.1:8091/pools/default/bucketsStreaming/recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba [ns_server:info] [2012-03-26 1:03:53] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log ns_memcached:2("Shutting down bucket \"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba\" on 'ns_1@127.0.0.1' for deletion") because it's been seen 1 times in the past 12.684181 secs (last seen 12.684181 secs ago [ns_server:info] [2012-03-26 1:03:53] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:03:53] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:53] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:53] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/master">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/0">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/1">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/10">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/100">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/101">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/102">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/103">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/104">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/105">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/106">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/107">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/108">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/109">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/11">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/110">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/111">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/112">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/113">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/114">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/115">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/116">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/117">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/118">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/119">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/12">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/120">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/121">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/122">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/123">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/124">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/125">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/126">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/127">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/128">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/129">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/13">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/130">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/131">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/132">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/133">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/134">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/135">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/136">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/137">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/138">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/139">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/14">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/140">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/141">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/142">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/143">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/144">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/145">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/146">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/147">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/148">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/149">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/15">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/150">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/151">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/152">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/153">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/154">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/155">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/156">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/157">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/158">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/159">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/16">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/160">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/161">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/162">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/163">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/164">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/165">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/166">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/167">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/168">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/169">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/17">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/170">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/171">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/172">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/173">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/174">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/175">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/176">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/177">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/178">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/179">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/18">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Shutting down tap connections! [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/180">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/181">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/182">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/183">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/184">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/185">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/186">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/187">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/188">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/189">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/19">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/190">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/191">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/192">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/193">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/194">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/195">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/196">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/197">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/198">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/199">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/2">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/20">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/200">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/201">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/202">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/203">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/204">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/205">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/206">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/207">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/208">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/209">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/21">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/210">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/211">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/212">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/213">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/214">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/215">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/216">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/217">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/218">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/219">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/22">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/220">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/221">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/222">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/223">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/224">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/225">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/226">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/227">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/228">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/229">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/23">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/230">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/231">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/232">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/233">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/234">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/235">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/236">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/237">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/238">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/239">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/24">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/240">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/241">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/242">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/243">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/244">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/245">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/246">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/247">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/248">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/249">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/25">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/250">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/251">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/252">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/253">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/254">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/255">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/26">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/27">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/28">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/29">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/3">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/30">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/31">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/32">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/33">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/34">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/35">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/36">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/37">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/38">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/39">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/4">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/40">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/41">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/42">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/43">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/44">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/45">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/46">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/47">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/48">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/49">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/5">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/50">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/51">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/52">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/53">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/54">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/55">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/56">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/57">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/58">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/59">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/6">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/60">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/61">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/62">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/63">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/64">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/65">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/66">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/67">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/68">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/69">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/7">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/70">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/71">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/72">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/73">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/74">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/75">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/76">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/77">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/78">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/79">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/8">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/80">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/81">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/82">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/83">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/84">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/85">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/86">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/87">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/88">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/89">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/9">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/90">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/91">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/92">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/93">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/94">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/95">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/96">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/97">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/98">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:'ns_memcached-recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba':ns_storage_conf:delete_database:395] Deleting database <<"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba/99">>: ok [ns_server:info] [2012-03-26 1:03:54] [ns_1@127.0.0.1:<0.9488.0>:single_bucket_sup:top_loop:24] per-bucket supervisor for "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" died with reason shutdown [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:<0.12445.0>:ns_port_sup:restart_port:134] restarting port: {moxi, "/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p", "0", "-Y", "y", "-O", "stderr", []], [{env, [{"EVENT_NOSELECT", "1"}, {"MOXI_SASL_PLAIN_USR", "Administrator"}, {"MOXI_SASL_PLAIN_PWD", "password"}]}, use_stdio, exit_status, port_server_send_eol, stderr_to_stdout, stream]} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:<0.9476.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:<0.9476.0>:ns_port_server:log:166] moxi<0.9476.0>: EOL on stdin. Exiting [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:<0.12446.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:<0.9176.0>:menelaus_web:handle_streaming:950] menelaus_web streaming socket closed by client [error_logger:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.12446.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [menelaus:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:<0.9202.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba" [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:11("Deleted bucket \"recreate-non-default-4ef4e34f-0a13-46a9-9624-672f21a4a4ba\"\n") because it's been seen 1 times in the past 12.624912 secs (last seen 12.624912 secs ago [menelaus:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:<0.12151.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" of type: membase [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:wait_for_memcached:278] Waiting for "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"}, {single_bucket_sup, start_link, ["new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"]}, permanent, infinity, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [error_logger:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.12461.0>}, {name, {per_bucket_sup, "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"}}, {mfargs, {single_bucket_sup,start_link, ["new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [stats:error] [2012-03-26 1:03:55] [ns_1@127.0.0.1:<0.12153.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_memcached:ensure_bucket:700] Created bucket "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=209715200;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d;couch_port=11213;max_vbuckets=256;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d'} started: [{pid,<0.12464.0>}, {name, {ns_memcached,stats, "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"}}, {mfargs, {ns_memcached,start_link, [{"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d", stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d'} started: [{pid,<0.12477.0>}, {name, {ns_memcached,data, "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"}}, {mfargs, {ns_memcached,start_link, [{"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d", data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d'} started: [{pid,<0.12478.0>}, {name, {ns_vbm_sup, "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"}}, {mfargs, {ns_vbm_sup,start_link, ["new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d'} started: [{pid,<0.12479.0>}, {name, {ns_vbm_new_sup, "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"}}, {mfargs, {ns_vbm_new_sup,start_link, ["new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d'} started: [{pid,<0.12480.0>}, {name, {couch_stats_reader, "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"}}, {mfargs, {couch_stats_reader,start_link, ["new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d'} started: [{pid,<0.12481.0>}, {name, {stats_collector, "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"}}, {mfargs, {stats_collector,start_link, ["new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d'} started: [{pid,<0.12483.0>}, {name, {stats_archiver, "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"}}, {mfargs, {stats_archiver,start_link, ["new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-minute' [error_logger:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d'} started: [{pid,<0.12485.0>}, {name, {stats_reader, "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"}}, {mfargs, {stats_reader,start_link, ["new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d'} started: [{pid,<0.12486.0>}, {name, {failover_safeness_level, "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"}}, {mfargs, {failover_safeness_level,start_link, ["new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d'} started: [{pid,<0.12463.0>}, {name, {ns_memcached_sup, "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"}}, {mfargs, {ns_memcached_sup,start_link, ["new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [stats:error] [2012-03-26 1:03:55] [ns_1@127.0.0.1:<0.12155.0>:stats_reader:log_bad_responses:185] Bad replies: [{'ns_1@127.0.0.1', {error, {exit, {aborted, {no_exists, ['stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-minute']}}}}}] [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-minute', [{name,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-minute'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749035,103619},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,262,<0.12488.0>}} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-minute', [{name,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-minute'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749035,103619},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,262,<0.12488.0>}} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-hour' [error_logger:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d'} started: [{pid,<0.12489.0>}, {name, {capi_ddoc_replication_srv, "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"}}, {mfargs, {capi_ddoc_replication_srv,start_link, ["new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 0 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 1 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 2 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 3 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 4 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 5 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 6 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 7 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 8 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 9 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 10 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 11 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 12 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 13 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 14 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 15 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 16 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 17 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 18 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 19 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 20 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 21 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 22 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 23 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 24 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 25 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 26 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 27 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 28 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 29 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 30 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 31 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 32 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 33 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 34 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 35 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 36 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 37 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 38 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 39 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 40 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 41 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-hour', [{name,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-hour'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749035,129250},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,263,<0.12511.0>}} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-hour', [{name,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-hour'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749035,129250},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,263,<0.12511.0>}} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 42 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-day' [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 43 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 44 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 45 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 46 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 47 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 48 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 49 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 50 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 51 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 52 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 53 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 54 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 55 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 56 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 57 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 58 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 59 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 60 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 61 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 62 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 63 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 64 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 65 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 66 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 67 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 68 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-day', [{name,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-day'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749035,141996},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,264,<0.12567.0>}} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-day', [{name,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-day'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749035,141996},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,264,<0.12567.0>}} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-week' [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 69 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 70 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 71 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 72 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 73 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 74 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 75 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 76 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 77 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 78 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 79 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 80 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 81 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 82 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 83 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 84 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 85 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 86 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 87 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 88 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-week', [{name,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-week'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749035,151287},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,265,<0.12600.0>}} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-week', [{name,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-week'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749035,151287},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,265,<0.12600.0>}} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 89 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-month' [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 90 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 91 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 92 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 93 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 94 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 95 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 96 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 97 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 98 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 99 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 100 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 101 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 102 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 103 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 104 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 105 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 106 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 107 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 108 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 109 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 110 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 111 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 112 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 113 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 114 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-month', [{name,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-month'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749035,159444},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,266,<0.12628.0>}} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-month', [{name,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-month'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749035,159444},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,266,<0.12628.0>}} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-year' [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 115 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 116 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 117 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 118 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 119 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 120 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 121 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 122 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 123 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 124 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 125 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 126 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 127 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 128 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 129 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 130 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 131 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 132 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 133 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 134 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 135 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 136 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 137 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 138 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 139 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 140 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 141 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 142 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 143 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 144 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 145 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 146 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 147 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 148 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 149 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 150 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 151 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 152 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 153 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 154 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 155 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 156 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 157 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 158 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 159 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 160 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 161 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 162 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 163 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 164 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 165 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 166 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 167 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 168 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 169 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 170 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 171 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 172 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 173 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 174 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 175 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 176 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 177 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 178 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 179 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 180 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 181 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 182 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 183 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 184 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 185 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-year', [{name,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-year'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749035,168039},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,267,<0.12660.0>}} [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-year', [{name,'stats_archiver-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d-year'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749035,168039},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,267,<0.12660.0>}} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 186 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 187 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 188 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 189 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 190 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 191 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 192 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 193 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 194 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 195 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 196 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 197 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 198 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 199 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 200 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 201 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 202 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 203 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 204 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 205 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 206 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 207 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 208 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 209 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 210 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 211 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 212 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 213 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 214 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 215 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 216 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 217 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 218 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 219 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 220 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 221 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 222 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 223 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 224 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 225 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 226 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 227 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 228 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 229 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 230 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 231 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 232 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 233 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 234 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 235 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 236 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 237 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 238 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 239 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 240 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 241 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 242 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 243 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 244 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 245 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 246 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 247 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 248 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 249 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 250 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 251 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 252 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 253 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 254 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [views:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':capi_set_view_manager:apply_map:445] Applying map to bucket new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [couchdb:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':couch_log:error:42] MC daemon: Error opening vb 255 in <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d">>: {not_found, no_db_file} [error_logger:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d'} started: [{pid,<0.12512.0>}, {name, {capi_set_view_manager, "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"}}, {mfargs, {capi_set_view_manager,start_link, ["new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [views:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':capi_set_view_manager:apply_map:450] Classified vbuckets for new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:<0.12447.0>:ns_port_server:log:166] moxi<0.12447.0>: 2012-03-26 01:03:55: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.12447.0>: 2012-03-26 01:03:55: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) moxi<0.12447.0>: 2012-03-26 01:03:57: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.12447.0>: "name": "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d", moxi<0.12447.0>: "nodeLocator": "vbucket", moxi<0.12447.0>: "saslPassword": "password", moxi<0.12447.0>: "nodes": [{ moxi<0.12447.0>: "couchApiBase": "http://127.0.0.1:8092/new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d", moxi<0.12447.0>: "replication": 0, moxi<0.12447.0>: "clusterMembership": "active", moxi<0.12447.0>: "status": "warmup", moxi<0.12447.0>: "thisNode": true, moxi<0.12447.0>: "hostname": "127.0.0.1:8091", moxi<0.12447.0>: "clusterCompatibility": 1, moxi<0.12447.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.12447.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.12447.0>: "ports": { moxi<0.12447.0>: "proxy": 11211, moxi<0.12447.0>: "direct": 11210 moxi<0.12447.0>: } moxi<0.12447.0>: }], moxi<0.12447.0>: "vBucketServerMap": { moxi<0.12447.0>: "hashAlgorithm": "CRC", moxi<0.12447.0>: "numReplicas": 1, moxi<0.12447.0>: "serverList": ["127.0.0.1:11210"], moxi<0.12447.0>: "vBucketMap": [] moxi<0.12447.0>: } moxi<0.12447.0>: }) [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Warning: Data diretory does not exist, /opt/couchbase/var/lib/couchdb/new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d memcached<0.366.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.366.0>: Failed to load mutation log, falling back to key dump memcached<0.366.0>: Warning: Data diretory is empty, /opt/couchbase/var/lib/couchdb/new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d memcached<0.366.0>: metadata loaded in 174 usec memcached<0.366.0>: Warning: Data diretory is empty, /opt/couchbase/var/lib/couchdb/new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d memcached<0.366.0>: warmup completed in 264 usec [user:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_memcached:handle_info:310] Bucket "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" loaded on node 'ns_1@127.0.0.1' in 0 seconds. [stats:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:<0.12481.0>:stats_collector:handle_info:84] Stats for bucket "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d": accepting_conns 1 auth_cmds 0 auth_errors 0 bucket_active_conns 1 bucket_conns 2 bytes 118546 bytes_read 54 bytes_written 641 cas_badval 0 cas_hits 0 cas_misses 0 cmd_flush 0 cmd_get 0 cmd_set 0 conn_yields 0 connection_structures 0 curr_connections 12 curr_items 0 curr_items_tot 0 daemon_connections 10 decr_hits 0 decr_misses 0 delete_hits 0 delete_misses 0 ep_allow_data_loss_during_shutdown 1 ep_alog_block_size 4096 ep_alog_path ep_alog_sleep_time 30 ep_backend couchdb ep_bg_fetch_delay 0 ep_bg_fetched 0 ep_cache_size 0 ep_chk_max_items 30000 ep_chk_period 3600 ep_chk_remover_stime 5 ep_commit_num 0 ep_commit_time 0 ep_commit_time_total 0 ep_concurrentDB 1 ep_config_file ep_couch_bucket new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d ep_couch_default_batch_size 500 ep_couch_host localhost ep_couch_port 11213 ep_couch_reconnect_sleeptime 250 ep_couch_response_timeout 600000 ep_couch_vbucket_batch_count 4 ep_data_age 0 ep_data_age_highwat 0 ep_db_cleaner_status running ep_db_shards 4 ep_db_strategy multiDB ep_dbinit 0 ep_dbname /opt/couchbase/var/lib/couchdb/new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d ep_degraded_mode 0 ep_diskqueue_drain 0 ep_diskqueue_fill 0 ep_diskqueue_items 0 ep_diskqueue_memory 0 ep_diskqueue_pending 0 ep_exp_pager_stime 3600 ep_exp_pager_stime 3600 ep_expired 0 ep_expiry_window 3 ep_failpartialwarmup 0 ep_flush_all false ep_flush_duration 0 ep_flush_duration_highwat 0 ep_flush_duration_total 0 ep_flush_preempts 0 ep_flusher_state initializing ep_flusher_todo 0 ep_getl_default_timeout 15 ep_getl_max_timeout 30 ep_ht_locks 5 ep_ht_size 3079 ep_inconsistent_slave_chk 0 ep_initfile ep_io_num_read 0 ep_io_num_write 0 ep_io_read_bytes 0 ep_io_write_bytes 0 ep_item_begin_failed 0 ep_item_commit_failed 0 ep_item_flush_expired 0 ep_item_flush_failed 0 ep_item_num_based_new_chk 1 ep_items_rm_from_checkpoints 0 ep_keep_closed_chks 0 ep_klog_block_size 4096 ep_klog_compactor_queue_cap 500000 ep_klog_compactor_stime 3600 ep_klog_flush commit2 ep_klog_max_entry_ratio 10 ep_klog_max_log_size 2147483647 ep_klog_path ep_klog_sync commit2 ep_kv_size 0 ep_latency_arith_cmd 0 ep_latency_get_cmd 0 ep_max_checkpoints 2 ep_max_data_size 209715200 ep_max_item_size 20971520 ep_max_size 209715200 ep_max_txn_size 10000 ep_max_vbuckets 256 ep_mem_high_wat 157286400 ep_mem_high_wat 18446744073709551615 ep_mem_low_wat 125829120 ep_mem_low_wat 18446744073709551615 ep_min_data_age 0 ep_mlog_compactor_runs 0 ep_mutation_mem_threshold 0 ep_num_active_non_resident 0 ep_num_checkpoint_remover_runs 0 ep_num_eject_failures 0 ep_num_eject_replicas 0 ep_num_expiry_pager_runs 0 ep_num_non_resident 0 ep_num_not_my_vbuckets 0 ep_num_pager_runs 0 ep_num_value_ejects 0 ep_obs_reg_clean_job 0 ep_observe_calls 0 ep_observe_errors 0 ep_observe_registry_size 0 ep_oom_errors 0 ep_overhead 1312 ep_pending_ops 0 ep_pending_ops_max 0 ep_pending_ops_max_duration 0 ep_pending_ops_total 0 ep_postInitfile ep_queue_age_cap 900 ep_queue_size 0 ep_restore_file_checks 1 ep_restore_mode 0 ep_shardpattern %d/%b-%i.sqlite ep_stats_observe_polls 0 ep_storage_age 0 ep_storage_age_highwat 0 ep_storage_type featured ep_store_max_concurrency 10 ep_store_max_readers 9 ep_store_max_readwrite 1 ep_stored_val_type ep_tap_ack_grace_period 300 ep_tap_ack_initial_sequence_number 1 ep_tap_ack_interval 1000 ep_tap_ack_window_size 10 ep_tap_backfill_resident 0.9 ep_tap_backlog_limit 5000 ep_tap_backoff_period 5 ep_tap_bg_fetch_requeued 0 ep_tap_bg_fetched 0 ep_tap_bg_max_pending 500 ep_tap_conn_map_notifications 0 ep_tap_keepalive 300 ep_tap_noop_interval 20 ep_tap_requeue_sleep_time 0.1 ep_tap_throttle_queue_cap 1000000 ep_tap_throttle_threshold 90 ep_tmp_item_expiry_window 3600 ep_tmp_oom_errors 0 ep_too_old 0 ep_too_young 0 ep_total_cache_size 0 ep_total_del_items 0 ep_total_enqueued 0 ep_total_new_items 0 ep_total_observe_sets 0 ep_total_persisted 0 ep_uncommitted_items 0 ep_unobserve_calls 0 ep_value_size 0 ep_vb0 0 ep_vb_chunk_del_time 500 ep_vb_del_chunk_size 100 ep_vb_total 0 ep_vbucket_del 0 ep_vbucket_del_fail 0 ep_version 1.8.1r_467_g931cd34 ep_waitforwarmup 0 ep_warmup 1 ep_warmup_batch_size 1000 ep_warmup_min_items_threshold 10 ep_warmup_min_memory_threshold 10 get_hits 0 get_misses 0 incr_hits 0 incr_misses 0 libevent 2.0.11-stable limit_maxbytes 67108864 listen_disabled_num 0 mem_used 118546 pid 2631 pointer_size 64 rejected_conns 0 rusage_system 0.948855 rusage_user 4.911253 threads 4 time 1332749034 total_connections 30 uptime 57 vb_active_curr_items 0 vb_active_eject 0 vb_active_ht_memory 0 vb_active_itm_memory 0 vb_active_num 0 vb_active_num_non_resident 0 vb_active_ops_create 0 vb_active_ops_delete 0 vb_active_ops_reject 0 vb_active_ops_update 0 vb_active_perc_mem_resident 0 vb_active_queue_age 0 vb_active_queue_drain 0 vb_active_queue_fill 0 vb_active_queue_memory 0 vb_active_queue_pending 0 vb_active_queue_size 0 vb_dead_num 0 vb_pending_curr_items 0 vb_pending_eject 0 vb_pending_ht_memory 0 vb_pending_itm_memory 0 vb_pending_num 0 vb_pending_num_non_resident 0 vb_pending_ops_create 0 vb_pending_ops_delete 0 vb_pending_ops_reject 0 vb_pending_ops_update 0 vb_pending_perc_mem_resident 0 vb_pending_queue_age 0 vb_pending_queue_drain 0 vb_pending_queue_fill 0 vb_pending_queue_memory 0 vb_pending_queue_pending 0 vb_pending_queue_size 0 vb_replica_curr_items 0 vb_replica_eject 0 vb_replica_ht_memory 0 vb_replica_itm_memory 0 vb_replica_num 0 vb_replica_num_non_resident 0 vb_replica_ops_create 0 vb_replica_ops_delete 0 vb_replica_ops_reject 0 vb_replica_ops_update 0 vb_replica_perc_mem_resident 0 vb_replica_queue_age 0 vb_replica_queue_drain 0 vb_replica_queue_fill 0 vb_replica_queue_memory 0 vb_replica_queue_pending 0 vb_replica_queue_size 0 version 1.4.4_516_g2f7d183 [ns_server:info] [2012-03-26 1:03:55] [ns_1@127.0.0.1:ns_doctor:ns_doctor:update_status:207] The following buckets became ready on node 'ns_1@127.0.0.1': ["new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"] [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12447.0>:ns_port_server:log:166] moxi<0.12447.0>: 2012-03-26 01:03:57: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.12447.0>: "name": "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d", moxi<0.12447.0>: "nodeLocator": "vbucket", moxi<0.12447.0>: "saslPassword": "password", moxi<0.12447.0>: "nodes": [{ moxi<0.12447.0>: "couchApiBase": "http://127.0.0.1:8092/new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d", moxi<0.12447.0>: "replication": 0, moxi<0.12447.0>: "clusterMembership": "active", moxi<0.12447.0>: "status": "healthy", moxi<0.12447.0>: "thisNode": true, moxi<0.12447.0>: "hostname": "127.0.0.1:8091", moxi<0.12447.0>: "clusterCompatibility": 1, moxi<0.12447.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.12447.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.12447.0>: "ports": { moxi<0.12447.0>: "proxy": 11211, moxi<0.12447.0>: "direct": 11210 moxi<0.12447.0>: } moxi<0.12447.0>: }], moxi<0.12447.0>: "vBucketServerMap": { moxi<0.12447.0>: "hashAlgorithm": "CRC", moxi<0.12447.0>: "numReplicas": 1, moxi<0.12447.0>: "serverList": ["127.0.0.1:11210"], moxi<0.12447.0>: "vBucketMap": [] moxi<0.12447.0>: } moxi<0.12447.0>: }) [views:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':capi_set_view_manager:apply_map:445] Applying map to bucket new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d: [{active,[]}, {passive,[]}, {ignore,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {replica,[]}] [views:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':capi_set_view_manager:apply_map:450] Classified vbuckets for new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 0 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 1 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 2 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 3 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 4 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 5 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}, {map,[['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1'|...], [...]|...]}]}]}] [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 6 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 7 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 8 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 9 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 10 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: vbucket_map_history -> [{[['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1',undefined], ['ns_1@127.0.0.1'|...], [...]|...], [{max_slaves,10}]}] [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 11 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 12 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 13 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 14 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 15 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 16 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 17 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 18 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 19 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 20 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 21 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 22 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 23 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 24 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 25 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 26 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 27 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 28 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 29 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 30 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 31 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 32 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 33 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 34 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 35 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 36 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 37 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 38 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 39 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 40 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 41 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 42 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 43 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 44 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 45 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 46 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 47 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 48 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 49 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 50 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 51 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 52 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 53 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 54 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 55 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 56 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 57 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 58 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 59 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 60 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 61 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 62 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 63 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 64 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 65 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 66 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 67 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 68 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 69 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 70 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 71 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 72 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 73 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 74 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 75 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 76 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 77 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 78 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 79 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 80 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 81 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 82 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 83 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 84 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 85 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 86 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 87 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 88 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 89 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 90 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 91 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 92 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 93 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 94 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 95 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 96 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 97 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 98 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 99 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 100 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 101 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 102 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 103 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 104 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 105 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 106 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 107 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 108 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 109 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 110 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 111 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 112 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 113 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 114 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 115 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 116 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 117 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 118 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 119 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 120 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 121 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 122 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 123 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 124 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 125 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 126 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 127 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 128 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 129 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 130 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 131 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 132 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 133 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 134 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 135 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 136 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 137 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 138 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 139 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 140 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 141 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 142 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 143 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 144 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 145 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 146 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 147 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 148 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 149 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 150 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 151 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 152 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 153 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 154 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 155 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 156 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 157 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 158 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 159 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 160 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 161 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 162 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 163 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 164 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 165 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 166 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 167 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 168 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 169 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 170 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 171 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 172 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 173 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 174 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 175 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 176 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 177 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 178 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 179 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 180 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 181 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 182 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 183 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 184 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 185 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 186 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 187 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 188 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 189 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 190 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 191 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 192 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 193 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 194 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 195 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 196 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 197 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 198 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 199 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 200 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 201 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 202 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 203 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 204 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 205 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 206 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 207 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 208 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 209 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 210 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 211 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 212 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 213 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 214 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 215 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 216 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 217 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 218 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 219 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 220 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 221 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 222 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 223 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 224 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 225 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 226 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 227 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 228 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 229 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 230 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 231 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 232 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 233 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 234 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 235 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 236 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 237 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 238 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 239 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 240 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 241 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 242 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 243 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 244 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 245 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 246 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 247 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 248 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 249 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 250 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 251 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 252 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 253 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 254 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-03-26 1:03:56] [ns_1@127.0.0.1:<0.12457.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 255 in "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' from missing to active. [ns_doctor:info] [2012-03-26 1:03:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,749035,798972}}, {outgoing_replications_safeness_level, [{"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d",unknown}]}, {incoming_replications_conf_hashes, [{"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d",[]}]}, {replication,[{"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d",1.0}]}, {active_buckets,["new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"]}, {ready_buckets,["new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"]}, {local_tasks,[]}, {memory, [{total,36280984}, {processes,9950248}, {processes_used,8296112}, {system,26330736}, {atom,1298601}, {atom_used,1271280}, {binary,474112}, {code,12767707}, {ets,1509376}]}, {system_stats, [{cpu_utilization_rate,28.0}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,57}, {memory_data,{4040077312,4013039616,{<0.7.0>,142856}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25704 kB\nBuffers: 46968 kB\nCached: 3536432 kB\nSwapCached: 0 kB\nActive: 312884 kB\nInactive: 3439392 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25704 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 424 kB\nWriteback: 0 kB\nAnonPages: 168608 kB\nMapped: 24848 kB\nSlab: 134928 kB\nPageTables: 6568 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 652464 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3621306368}, {buffered_memory,48095232}, {free_memory,26320896}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{46535,1}}, {context_switches,{273568,0}}, {garbage_collection,{112597,99429480,0}}, {io,{{input,13841777},{output,5934788}}}, {reductions,{63898326,24508154}}, {run_queue,0}, {runtime,{8750,3010}}]}]}] [ns_server:error] [2012-03-26 1:04:00] [ns_1@127.0.0.1:<0.401.0>:couchbase_compaction_daemon:vbuckets_need_compaction:232] Couldn't open vbucket database `new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/48`: {not_found, no_db_file} [views:info] [2012-03-26 1:04:05] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':capi_set_view_manager:apply_map:445] Applying map to bucket new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[]}] [views:info] [2012-03-26 1:04:05] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':capi_set_view_manager:apply_map:450] Classified vbuckets for new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48, 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71, 72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94, 95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112, 113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129, 130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-03-26 1:04:06] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d"}, <0.12461.0>, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:04:06] [ns_1@127.0.0.1:<0.12461.0>:single_bucket_sup:top_loop:27] Delegating exit {'EXIT', <0.385.0>, shutdown} to child supervisor: <0.12462.0> [user:info] [2012-03-26 1:04:06] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_memcached:terminate:348] Shutting down bucket "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" on 'ns_1@127.0.0.1' for deletion [ns_server:info] [2012-03-26 1:04:06] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:04:06] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:06] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:06] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:06] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/master">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/0">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/1">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/10">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/100">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/101">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/102">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/103">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/104">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/105">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/106">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/107">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/108">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/109">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/11">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/110">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/111">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/112">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/113">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/114">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/115">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/116">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/117">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/118">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/119">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/12">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/120">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/121">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/122">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/123">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/124">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/125">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/126">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/127">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/128">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/129">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/13">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/130">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/131">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/132">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/133">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/134">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/135">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/136">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/137">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/138">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/139">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/14">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/140">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/141">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/142">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/143">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/144">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/145">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/146">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/147">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/148">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/149">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/15">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/150">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/151">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/152">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/153">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/154">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/155">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/156">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/157">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/158">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/159">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/16">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/160">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/161">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/162">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/163">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/164">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/165">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/166">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/167">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/168">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/169">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/17">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/170">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/171">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/172">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/173">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/174">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/175">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/176">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/177">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/178">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Shutting down tap connections! [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/179">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/18">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/180">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/181">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/182">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/183">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/184">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/185">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/186">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/187">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/188">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/189">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/19">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/190">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/191">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/192">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/193">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/194">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/195">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/196">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/197">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/198">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/199">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/2">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/20">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/200">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/201">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/202">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/203">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/204">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/205">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/206">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/207">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/208">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/209">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/21">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/210">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/211">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/212">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/213">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/214">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/215">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/216">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/217">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/218">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/219">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/22">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/220">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/221">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/222">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/223">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/224">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/225">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/226">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/227">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/228">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/229">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/23">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/230">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/231">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/232">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/233">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/234">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/235">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/236">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/237">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/238">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/239">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/24">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/240">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/241">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/242">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/243">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/244">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/245">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/246">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/247">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/248">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/249">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/25">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/250">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/251">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/252">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/253">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/254">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/255">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/26">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/27">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/28">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/29">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/3">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/30">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/31">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/32">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/33">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/34">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/35">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/36">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/37">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/38">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/39">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/4">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/40">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/41">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/42">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/43">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/44">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/45">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/46">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/47">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/48">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/49">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/5">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/50">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/51">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/52">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/53">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/54">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/55">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/56">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/57">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/58">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/59">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/6">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/60">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/61">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/62">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/63">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/64">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/65">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/66">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/67">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/68">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/69">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/7">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/70">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/71">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/72">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/73">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/74">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/75">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/76">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/77">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/78">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/79">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/8">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/80">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/81">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/82">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/83">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/84">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/85">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/86">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/87">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/88">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/89">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/9">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/90">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/91">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/92">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/93">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/94">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/95">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/96">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/97">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/98">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d/99">>: ok [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:<0.12461.0>:single_bucket_sup:top_loop:24] per-bucket supervisor for "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" died with reason shutdown [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:<0.15700.0>:ns_port_sup:restart_port:134] restarting port: {moxi, "/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p", "0", "-Y", "y", "-O", "stderr", []], [{env, [{"EVENT_NOSELECT", "1"}, {"MOXI_SASL_PLAIN_USR", "Administrator"}, {"MOXI_SASL_PLAIN_PWD", "password"}]}, use_stdio, exit_status, port_server_send_eol, stderr_to_stdout, stream]} [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:<0.9815.0>:menelaus_web:handle_streaming:950] menelaus_web streaming socket closed by client [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:<0.12447.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:<0.12447.0>:ns_port_server:log:166] moxi<0.12447.0>: EOL on stdin. Exiting [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:<0.15701.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [error_logger:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.15701.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [menelaus:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:<0.12157.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "new-bucket-da5acbe4-aedc-4bff-9a29-5fd767fc211d" [menelaus:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:<0.15423.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:12("Created bucket \"default\" of type: membase\n") because it's been seen 2 times in the past 63.082826 secs (last seen 50.40701 secs ago [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:<0.15712.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,0}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "default"}, {single_bucket_sup, start_link, ["default"]}, permanent, infinity, supervisor, [single_bucket_sup]} [error_logger:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.15716.0>}, {name,{per_bucket_sup,"default"}}, {mfargs,{single_bucket_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,0}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:ensure_bucket:700] Created bucket "default" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=209715200;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/default;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=default;couch_port=11213;max_vbuckets=256;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.15719.0>}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.15732.0>}, {name,{ns_memcached,data,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.15733.0>}, {name,{ns_vbm_sup,"default"}}, {mfargs,{ns_vbm_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.15734.0>}, {name,{ns_vbm_new_sup,"default"}}, {mfargs,{ns_vbm_new_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.15735.0>}, {name,{couch_stats_reader,"default"}}, {mfargs,{couch_stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.15736.0>}, {name,{stats_collector,"default"}}, {mfargs,{stats_collector,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.15738.0>}, {name,{stats_archiver,"default"}}, {mfargs,{stats_archiver,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.15740.0>}, {name,{stats_reader,"default"}}, {mfargs,{stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.15741.0>}, {name,{failover_safeness_level,"default"}}, {mfargs, {failover_safeness_level,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.15718.0>}, {name,{ns_memcached_sup,"default"}}, {mfargs,{ns_memcached_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.15742.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 0 in <<"default">>: {not_found, no_db_file} [stats:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:<0.15736.0>:stats_collector:handle_info:84] Stats for bucket "default": accepting_conns 1 auth_cmds 0 auth_errors 0 bucket_active_conns 1 bucket_conns 2 bytes 118515 bytes_read 67 bytes_written 48 cas_badval 0 cas_hits 0 cas_misses 0 cmd_flush 0 cmd_get 0 cmd_set 0 conn_yields 0 connection_structures 0 curr_connections 12 curr_items 0 curr_items_tot 0 daemon_connections 10 decr_hits 0 decr_misses 0 delete_hits 0 delete_misses 0 ep_allow_data_loss_during_shutdown 1 ep_alog_block_size 4096 ep_alog_path ep_alog_sleep_time 30 ep_backend couchdb ep_bg_fetch_delay 0 ep_bg_fetched 0 ep_cache_size 0 ep_chk_max_items 30000 ep_chk_period 3600 ep_chk_remover_stime 5 ep_commit_num 0 ep_commit_time 0 ep_commit_time_total 0 ep_concurrentDB 1 ep_config_file ep_couch_bucket default ep_couch_default_batch_size 500 ep_couch_host localhost ep_couch_port 11213 ep_couch_reconnect_sleeptime 250 ep_couch_response_timeout 600000 ep_couch_vbucket_batch_count 4 ep_data_age 0 ep_data_age_highwat 0 ep_db_cleaner_status running ep_db_shards 4 ep_db_strategy multiDB ep_dbinit 0 ep_dbname /opt/couchbase/var/lib/couchdb/default ep_degraded_mode 0 ep_diskqueue_drain 0 ep_diskqueue_fill 0 ep_diskqueue_items 0 ep_diskqueue_memory 0 ep_diskqueue_pending 0 ep_exp_pager_stime 3600 ep_exp_pager_stime 3600 ep_expired 0 ep_expiry_window 3 ep_failpartialwarmup 0 ep_flush_all false ep_flush_duration 0 ep_flush_duration_highwat 0 ep_flush_duration_total 0 ep_flush_preempts 0 ep_flusher_state initializing ep_flusher_todo 0 ep_getl_default_timeout 15 ep_getl_max_timeout 30 ep_ht_locks 5 ep_ht_size 3079 ep_inconsistent_slave_chk 0 ep_initfile ep_io_num_read 0 ep_io_num_write 0 ep_io_read_bytes 0 ep_io_write_bytes 0 ep_item_begin_failed 0 ep_item_commit_failed 0 ep_item_flush_expired 0 ep_item_flush_failed 0 ep_item_num_based_new_chk 1 ep_items_rm_from_checkpoints 0 ep_keep_closed_chks 0 ep_klog_block_size 4096 ep_klog_compactor_queue_cap 500000 ep_klog_compactor_stime 3600 ep_klog_flush commit2 ep_klog_max_entry_ratio 10 ep_klog_max_log_size 2147483647 ep_klog_path ep_klog_sync commit2 ep_kv_size 0 ep_latency_arith_cmd 0 ep_latency_get_cmd 0 ep_max_checkpoints 2 ep_max_data_size 209715200 ep_max_item_size 20971520 ep_max_size 209715200 ep_max_txn_size 10000 ep_max_vbuckets 256 ep_mem_high_wat 157286400 ep_mem_high_wat 18446744073709551615 ep_mem_low_wat 125829120 ep_mem_low_wat 18446744073709551615 ep_min_data_age 0 ep_mlog_compactor_runs 0 ep_mutation_mem_threshold 0 ep_num_active_non_resident 0 ep_num_checkpoint_remover_runs 0 ep_num_eject_failures 0 ep_num_eject_replicas 0 ep_num_expiry_pager_runs 0 ep_num_non_resident 0 ep_num_not_my_vbuckets 0 ep_num_pager_runs 0 ep_num_value_ejects 0 ep_obs_reg_clean_job 0 ep_observe_calls 0 ep_observe_errors 0 ep_observe_registry_size 0 ep_oom_errors 0 ep_overhead 1312 ep_pending_ops 0 ep_pending_ops_max 0 ep_pending_ops_max_duration 0 ep_pending_ops_total 0 ep_postInitfile ep_queue_age_cap 900 ep_queue_size 0 ep_restore_file_checks 1 ep_restore_mode 0 ep_shardpattern %d/%b-%i.sqlite ep_stats_observe_polls 0 ep_storage_age 0 ep_storage_age_highwat 0 ep_storage_type featured ep_store_max_concurrency 10 ep_store_max_readers 9 ep_store_max_readwrite 1 ep_stored_val_type ep_tap_ack_grace_period 300 ep_tap_ack_initial_sequence_number 1 ep_tap_ack_interval 1000 ep_tap_ack_window_size 10 ep_tap_backfill_resident 0.9 ep_tap_backlog_limit 5000 ep_tap_backoff_period 5 ep_tap_bg_fetch_requeued 0 ep_tap_bg_fetched 0 ep_tap_bg_max_pending 500 ep_tap_conn_map_notifications 0 ep_tap_keepalive 300 ep_tap_noop_interval 20 ep_tap_requeue_sleep_time 0.1 ep_tap_throttle_queue_cap 1000000 ep_tap_throttle_threshold 90 ep_tmp_item_expiry_window 3600 ep_tmp_oom_errors 0 ep_too_old 0 ep_too_young 0 ep_total_cache_size 0 ep_total_del_items 0 ep_total_enqueued 0 ep_total_new_items 0 ep_total_observe_sets 0 ep_total_persisted 0 ep_uncommitted_items 0 ep_unobserve_calls 0 ep_value_size 0 ep_vb0 0 ep_vb_chunk_del_time 500 ep_vb_del_chunk_size 100 ep_vb_total 0 ep_vbucket_del 0 ep_vbucket_del_fail 0 ep_version 1.8.1r_467_g931cd34 ep_waitforwarmup 0 ep_warmup 1 ep_warmup_batch_size 1000 ep_warmup_min_items_threshold 10 ep_warmup_min_memory_threshold 10 get_hits 0 get_misses 0 incr_hits 0 incr_misses 0 libevent 2.0.11-stable limit_maxbytes 67108864 listen_disabled_num 0 mem_used 118515 pid 2631 pointer_size 64 rejected_conns 0 rusage_system 1.091834 rusage_user 5.310192 threads 4 time 1332749046 total_connections 33 uptime 69 vb_active_curr_items 0 vb_active_eject 0 vb_active_ht_memory 0 vb_active_itm_memory 0 vb_active_num 0 vb_active_num_non_resident 0 vb_active_ops_create 0 vb_active_ops_delete 0 vb_active_ops_reject 0 vb_active_ops_update 0 vb_active_perc_mem_resident 0 vb_active_queue_age 0 vb_active_queue_drain 0 vb_active_queue_fill 0 vb_active_queue_memory 0 vb_active_queue_pending 0 vb_active_queue_size 0 vb_dead_num 0 vb_pending_curr_items 0 vb_pending_eject 0 vb_pending_ht_memory 0 vb_pending_itm_memory 0 vb_pending_num 0 vb_pending_num_non_resident 0 vb_pending_ops_create 0 vb_pending_ops_delete 0 vb_pending_ops_reject 0 vb_pending_ops_update 0 vb_pending_perc_mem_resident 0 vb_pending_queue_age 0 vb_pending_queue_drain 0 vb_pending_queue_fill 0 vb_pending_queue_memory 0 vb_pending_queue_pending 0 vb_pending_queue_size 0 vb_replica_curr_items 0 vb_replica_eject 0 vb_replica_ht_memory 0 vb_replica_itm_memory 0 vb_replica_num 0 vb_replica_num_non_resident 0 vb_replica_ops_create 0 vb_replica_ops_delete 0 vb_replica_ops_reject 0 vb_replica_ops_update 0 vb_replica_perc_mem_resident 0 vb_replica_queue_age 0 vb_replica_queue_drain 0 vb_replica_queue_fill 0 vb_replica_queue_memory 0 vb_replica_queue_pending 0 vb_replica_queue_size 0 version 1.4.4_516_g2f7d183 [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 1 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 2 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 3 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 4 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 5 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 6 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 7 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 8 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 9 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 10 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 11 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 12 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 13 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 14 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 15 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 16 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 17 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 18 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 19 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 20 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 21 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 22 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 23 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 24 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 25 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 26 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 27 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 28 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 29 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 30 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 31 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 32 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 33 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 34 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 35 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 36 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 37 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 38 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 39 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 40 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 41 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 42 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 43 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 44 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 45 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 46 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 47 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 48 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 49 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 50 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 51 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 52 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 53 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 54 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 55 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 56 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 57 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 58 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 59 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 60 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 61 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 62 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 63 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 64 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 65 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 66 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 67 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 68 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 69 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 70 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 71 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 72 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 73 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 74 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 75 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 76 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 77 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 78 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 79 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 80 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 81 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 82 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 83 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 84 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 85 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 86 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 87 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 88 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 89 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 90 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 91 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 92 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 93 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 94 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 95 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 96 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 97 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 98 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 99 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 100 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 101 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 102 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 103 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 104 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 105 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 106 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 107 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 108 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 109 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 110 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 111 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 112 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 113 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 114 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 115 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 116 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 117 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 118 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 119 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 120 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 121 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 122 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 123 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 124 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 125 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 126 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 127 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 128 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 129 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 130 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 131 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 132 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 133 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 134 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 135 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 136 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 137 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 138 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 139 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 140 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 141 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 142 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 143 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 144 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 145 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 146 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 147 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 148 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 149 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 150 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 151 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 152 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 153 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 154 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 155 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 156 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 157 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 158 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 159 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 160 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 161 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 162 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 163 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 164 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 165 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 166 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 167 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 168 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 169 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 170 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 171 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 172 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 173 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 174 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 175 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 176 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 177 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 178 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 179 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 180 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 181 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 182 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 183 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 184 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 185 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 186 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 187 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 188 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 189 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 190 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 191 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 192 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 193 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 194 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 195 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 196 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 197 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 198 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 199 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 200 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 201 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 202 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 203 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 204 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 205 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 206 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 207 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 208 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 209 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 210 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 211 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 212 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 213 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 214 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 215 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 216 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 217 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 218 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 219 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 220 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 221 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 222 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 223 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 224 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 225 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 226 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 227 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 228 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 229 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 230 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 231 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 232 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 233 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 234 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 235 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 236 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 237 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 238 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 239 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 240 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 241 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 242 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 243 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 244 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 245 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 246 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 247 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 248 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 249 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 250 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 251 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 252 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 253 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 254 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 255 in <<"default">>: {not_found, no_db_file} [views:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [views:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [error_logger:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.15756.0>}, {name,{capi_set_view_manager,"default"}}, {mfargs,{capi_set_view_manager,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason shutdown [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "default"}, <0.15716.0>, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:<0.15716.0>:single_bucket_sup:top_loop:27] Delegating exit {'EXIT', <0.385.0>, shutdown} to child supervisor: <0.15717.0> [user:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:terminate:348] Shutting down bucket "default" on 'ns_1@127.0.0.1' for deletion [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log ns_memcached:2("Shutting down bucket \"default\" on 'ns_1@127.0.0.1' for deletion") because it's been seen 2 times in the past 51.615724 secs (last seen 39.027897 secs ago [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:<0.15702.0>:ns_port_server:log:166] moxi<0.15702.0>: 2012-03-26 01:04:07: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.15702.0>: 2012-03-26 01:04:07: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) moxi<0.15702.0>: 2012-03-26 01:04:09: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.15702.0>: "name": "default", moxi<0.15702.0>: "nodeLocator": "vbucket", moxi<0.15702.0>: "saslPassword": "", moxi<0.15702.0>: "nodes": [{ moxi<0.15702.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.15702.0>: "replication": 0, moxi<0.15702.0>: "clusterMembership": "active", moxi<0.15702.0>: "status": "warmup", moxi<0.15702.0>: "thisNode": true, moxi<0.15702.0>: "hostname": "127.0.0.1:8091", moxi<0.15702.0>: "clusterCompatibility": 1, moxi<0.15702.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.15702.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.15702.0>: "ports": { moxi<0.15702.0>: "proxy": 11211, moxi<0.15702.0>: "direct": 11210 moxi<0.15702.0>: } moxi<0.15702.0>: }], moxi<0.15702.0>: "vBucketServerMap": { moxi<0.15702.0>: "hashAlgorithm": "CRC", moxi<0.15702.0>: "numReplicas": 0, moxi<0.15702.0>: "serverList": ["127.0.0.1:11210"], moxi<0.15702.0>: "vBucketMap": [] moxi<0.15702.0>: } moxi<0.15702.0>: }) [ns_server:info] [2012-03-26 1:04:07] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.366.0>: Failed to load mutation log, falling back to key dump memcached<0.366.0>: metadata loaded in 173 usec memcached<0.366.0>: warmup completed in 269 usec [ns_server:info] [2012-03-26 1:04:08] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/master">>: ok [ns_server:info] [2012-03-26 1:04:08] [ns_1@127.0.0.1:<0.15716.0>:single_bucket_sup:top_loop:24] per-bucket supervisor for "default" died with reason shutdown [ns_server:info] [2012-03-26 1:04:08] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Shutting down tap connections! [ns_server:info] [2012-03-26 1:04:08] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:08] [ns_1@127.0.0.1:<0.16033.0>:ns_port_sup:restart_port:134] restarting port: {moxi, "/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p", "0", "-Y", "y", "-O", "stderr", []], [{env, [{"EVENT_NOSELECT", "1"}, {"MOXI_SASL_PLAIN_USR", "Administrator"}, {"MOXI_SASL_PLAIN_PWD", "password"}]}, use_stdio, exit_status, port_server_send_eol, stderr_to_stdout, stream]} [ns_server:info] [2012-03-26 1:04:08] [ns_1@127.0.0.1:<0.12174.0>:menelaus_web:handle_streaming:950] menelaus_web streaming socket closed by client [ns_server:info] [2012-03-26 1:04:08] [ns_1@127.0.0.1:<0.15702.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-03-26 1:04:08] [ns_1@127.0.0.1:<0.15702.0>:ns_port_server:log:166] moxi<0.15702.0>: EOL on stdin. Exiting [ns_server:info] [2012-03-26 1:04:08] [ns_1@127.0.0.1:<0.16034.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [menelaus:info] [2012-03-26 1:04:08] [ns_1@127.0.0.1:<0.15427.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [error_logger:info] [2012-03-26 1:04:08] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.16034.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:08] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:11("Deleted bucket \"default\"\n") because it's been seen 2 times in the past 51.645673 secs (last seen 39.193914 secs ago [menelaus:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:<0.12167.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:12("Created bucket \"default\" of type: membase\n") because it's been seen 3 times in the past 64.438872 secs (last seen 1.356046 secs ago [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:<0.16044.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "default"}, {single_bucket_sup, start_link, ["default"]}, permanent, infinity, supervisor, [single_bucket_sup]} [error_logger:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.16047.0>}, {name,{per_bucket_sup,"default"}}, {mfargs,{single_bucket_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:ensure_bucket:700] Created bucket "default" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=209715200;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/default;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=default;couch_port=11213;max_vbuckets=256;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16050.0>}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16062.0>}, {name,{ns_memcached,data,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16063.0>}, {name,{ns_vbm_sup,"default"}}, {mfargs,{ns_vbm_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16064.0>}, {name,{ns_vbm_new_sup,"default"}}, {mfargs,{ns_vbm_new_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16065.0>}, {name,{couch_stats_reader,"default"}}, {mfargs,{couch_stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16066.0>}, {name,{stats_collector,"default"}}, {mfargs,{stats_collector,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16068.0>}, {name,{stats_archiver,"default"}}, {mfargs,{stats_archiver,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16070.0>}, {name,{stats_reader,"default"}}, {mfargs,{stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16071.0>}, {name,{failover_safeness_level,"default"}}, {mfargs, {failover_safeness_level,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.16049.0>}, {name,{ns_memcached_sup,"default"}}, {mfargs,{ns_memcached_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.16072.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 0 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 1 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 2 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 3 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 4 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 5 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 6 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 7 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 8 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 9 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 10 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 11 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 12 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 13 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 14 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 15 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 16 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 17 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 18 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 19 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 20 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 21 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 22 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 23 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 24 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 25 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 26 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 27 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 28 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 29 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 30 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 31 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 32 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 33 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 34 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 35 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 36 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 37 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 38 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 39 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 40 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 41 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 42 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 43 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 44 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 45 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 46 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 47 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 48 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 49 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 50 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 51 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 52 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 53 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 54 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 55 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 56 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 57 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 58 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 59 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 60 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 61 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 62 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 63 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 64 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 65 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 66 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 67 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 68 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 69 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 70 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 71 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 72 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 73 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 74 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 75 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 76 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 77 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 78 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 79 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 80 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 81 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 82 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 83 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 84 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 85 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 86 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 87 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 88 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 89 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 90 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 91 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 92 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 93 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 94 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 95 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 96 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 97 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 98 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 99 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 100 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 101 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 102 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 103 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 104 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 105 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 106 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 107 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 108 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 109 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 110 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 111 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 112 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 113 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 114 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 115 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 116 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 117 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 118 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 119 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 120 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 121 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 122 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 123 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 124 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 125 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 126 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 127 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 128 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 129 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 130 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 131 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 132 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 133 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 134 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 135 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 136 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 137 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 138 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 139 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 140 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 141 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 142 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 143 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 144 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 145 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 146 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 147 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 148 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 149 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 150 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 151 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 152 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 153 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 154 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 155 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 156 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 157 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 158 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 159 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 160 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 161 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 162 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 163 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 164 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 165 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 166 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 167 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 168 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 169 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 170 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 171 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 172 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 173 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 174 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 175 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 176 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 177 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 178 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 179 in <<"default">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:<0.16035.0>:ns_port_server:log:166] moxi<0.16035.0>: 2012-03-26 01:04:08: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.16035.0>: 2012-03-26 01:04:08: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) moxi<0.16035.0>: 2012-03-26 01:04:10: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.16035.0>: "name": "default", moxi<0.16035.0>: "nodeLocator": "vbucket", moxi<0.16035.0>: "saslPassword": "", moxi<0.16035.0>: "nodes": [{ moxi<0.16035.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.16035.0>: "replication": 0, moxi<0.16035.0>: "clusterMembership": "active", moxi<0.16035.0>: "status": "warmup", moxi<0.16035.0>: "thisNode": true, moxi<0.16035.0>: "hostname": "127.0.0.1:8091", moxi<0.16035.0>: "clusterCompatibility": 1, moxi<0.16035.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.16035.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.16035.0>: "ports": { moxi<0.16035.0>: "proxy": 11211, moxi<0.16035.0>: "direct": 11210 moxi<0.16035.0>: } moxi<0.16035.0>: }], moxi<0.16035.0>: "vBucketServerMap": { moxi<0.16035.0>: "hashAlgorithm": "CRC", moxi<0.16035.0>: "numReplicas": 1, moxi<0.16035.0>: "serverList": ["127.0.0.1:11210"], moxi<0.16035.0>: "vBucketMap": [] moxi<0.16035.0>: } moxi<0.16035.0>: }) [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 180 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 181 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 182 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 183 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 184 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 185 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 186 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 187 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 188 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 189 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 190 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 191 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 192 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 193 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 194 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 195 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 196 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 197 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 198 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 199 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 200 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 201 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 202 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 203 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 204 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 205 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 206 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 207 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 208 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 209 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 210 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 211 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 212 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 213 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 214 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 215 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 216 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 217 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 218 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 219 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 220 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 221 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 222 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 223 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 224 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 225 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 226 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 227 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 228 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 229 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 230 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 231 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 232 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 233 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 234 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 235 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 236 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 237 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 238 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 239 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 240 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 241 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 242 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 243 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 244 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 245 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 246 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 247 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 248 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 249 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 250 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 251 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 252 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 253 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 254 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 255 in <<"default">>: {not_found, no_db_file} [views:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [views:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [error_logger:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.16086.0>}, {name,{capi_set_view_manager,"default"}}, {mfargs,{capi_set_view_manager,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason shutdown [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "default"}, <0.16047.0>, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:<0.16047.0>:single_bucket_sup:top_loop:27] Delegating exit {'EXIT', <0.385.0>, shutdown} to child supervisor: <0.16048.0> [user:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:terminate:348] Shutting down bucket "default" on 'ns_1@127.0.0.1' for deletion [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log ns_memcached:2("Shutting down bucket \"default\" on 'ns_1@127.0.0.1' for deletion") because it's been seen 3 times in the past 53.050999 secs (last seen 1.435275 secs ago [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:09] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.366.0>: Failed to load mutation log, falling back to key dump memcached<0.366.0>: metadata loaded in 184 usec memcached<0.366.0>: warmup completed in 282 usec [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/master">>: ok [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:<0.16047.0>:single_bucket_sup:top_loop:24] per-bucket supervisor for "default" died with reason shutdown [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:<0.16368.0>:ns_port_sup:restart_port:134] restarting port: {moxi, "/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p", "0", "-Y", "y", "-O", "stderr", []], [{env, [{"EVENT_NOSELECT", "1"}, {"MOXI_SASL_PLAIN_USR", "Administrator"}, {"MOXI_SASL_PLAIN_PWD", "password"}]}, use_stdio, exit_status, port_server_send_eol, stderr_to_stdout, stream]} [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:<0.12161.0>:menelaus_web:handle_streaming:950] menelaus_web streaming socket closed by client [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:<0.16035.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:<0.16035.0>:ns_port_server:log:166] moxi<0.16035.0>: EOL on stdin. Exiting [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:<0.16369.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [error_logger:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.16369.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [menelaus:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:<0.15704.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:11("Deleted bucket \"default\"\n") because it's been seen 3 times in the past 52.793426 secs (last seen 1.147753 secs ago [menelaus:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:<0.16037.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:12("Created bucket \"default\" of type: membase\n") because it's been seen 4 times in the past 65.503254 secs (last seen 1.064382 secs ago [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:<0.16379.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,2}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "default"}, {single_bucket_sup, start_link, ["default"]}, permanent, infinity, supervisor, [single_bucket_sup]} [error_logger:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.16383.0>}, {name,{per_bucket_sup,"default"}}, {mfargs,{single_bucket_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,2}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:ensure_bucket:700] Created bucket "default" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=209715200;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/default;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=default;couch_port=11213;max_vbuckets=256;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16386.0>}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [error_logger:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16394.0>}, {name,{ns_memcached,data,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16397.0>}, {name,{ns_vbm_sup,"default"}}, {mfargs,{ns_vbm_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16398.0>}, {name,{ns_vbm_new_sup,"default"}}, {mfargs,{ns_vbm_new_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16399.0>}, {name,{couch_stats_reader,"default"}}, {mfargs,{couch_stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16400.0>}, {name,{stats_collector,"default"}}, {mfargs,{stats_collector,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16402.0>}, {name,{stats_archiver,"default"}}, {mfargs,{stats_archiver,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16404.0>}, {name,{stats_reader,"default"}}, {mfargs,{stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16405.0>}, {name,{failover_safeness_level,"default"}}, {mfargs, {failover_safeness_level,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.16385.0>}, {name,{ns_memcached_sup,"default"}}, {mfargs,{ns_memcached_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.16406.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 0 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 1 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 2 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 3 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 4 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 5 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 6 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 7 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 8 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 9 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 10 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 11 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 12 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 13 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 14 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 15 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 16 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 17 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 18 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 19 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 20 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 21 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 22 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 23 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 24 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 25 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 26 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 27 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 28 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 29 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 30 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 31 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 32 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 33 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 34 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 35 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 36 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 37 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 38 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 39 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 40 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 41 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 42 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 43 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 44 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 45 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 46 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 47 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 48 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 49 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 50 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 51 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 52 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 53 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 54 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 55 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 56 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 57 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 58 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 59 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 60 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 61 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 62 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 63 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 64 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 65 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 66 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 67 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 68 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 69 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 70 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 71 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 72 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 73 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 74 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 75 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 76 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 77 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 78 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 79 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 80 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 81 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 82 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 83 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 84 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 85 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 86 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 87 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 88 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 89 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 90 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 91 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 92 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 93 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 94 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 95 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 96 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 97 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 98 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 99 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 100 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 101 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 102 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 103 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 104 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 105 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 106 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 107 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 108 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 109 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 110 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 111 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 112 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 113 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 114 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 115 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 116 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 117 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 118 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 119 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 120 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 121 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 122 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 123 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 124 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 125 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 126 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 127 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 128 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 129 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 130 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 131 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 132 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 133 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 134 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 135 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 136 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 137 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 138 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 139 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 140 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 141 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 142 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 143 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 144 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 145 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 146 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 147 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 148 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 149 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 150 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 151 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 152 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 153 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 154 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 155 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 156 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 157 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 158 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 159 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 160 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 161 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 162 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 163 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 164 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 165 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 166 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 167 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 168 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 169 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 170 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 171 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 172 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 173 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 174 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 175 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 176 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 177 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 178 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 179 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 180 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 181 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 182 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 183 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 184 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 185 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 186 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 187 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 188 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 189 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 190 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 191 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 192 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 193 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 194 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 195 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 196 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 197 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 198 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 199 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 200 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 201 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 202 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 203 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 204 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 205 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 206 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 207 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 208 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 209 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 210 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 211 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 212 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 213 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 214 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 215 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 216 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 217 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 218 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 219 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 220 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 221 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 222 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 223 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 224 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 225 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 226 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 227 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 228 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 229 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 230 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 231 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 232 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 233 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 234 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 235 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 236 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 237 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 238 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 239 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 240 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 241 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 242 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 243 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 244 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 245 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 246 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 247 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 248 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 249 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 250 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 251 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 252 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 253 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 254 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 255 in <<"default">>: {not_found, no_db_file} [error_logger:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.16420.0>}, {name,{capi_set_view_manager,"default"}}, {mfargs,{capi_set_view_manager,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [views:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [views:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Shutting down tap connections! memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.366.0>: Failed to load mutation log, falling back to key dump memcached<0.366.0>: metadata loaded in 932 usec memcached<0.366.0>: warmup completed in 1005 usec [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:<0.16370.0>:ns_port_server:log:166] moxi<0.16370.0>: 2012-03-26 01:04:10: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.16370.0>: 2012-03-26 01:04:10: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) moxi<0.16370.0>: 2012-03-26 01:04:12: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.16370.0>: "name": "default", moxi<0.16370.0>: "nodeLocator": "vbucket", moxi<0.16370.0>: "saslPassword": "", moxi<0.16370.0>: "nodes": [{ moxi<0.16370.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.16370.0>: "replication": 0, moxi<0.16370.0>: "clusterMembership": "active", moxi<0.16370.0>: "status": "warmup", moxi<0.16370.0>: "thisNode": true, moxi<0.16370.0>: "hostname": "127.0.0.1:8091", moxi<0.16370.0>: "clusterCompatibility": 1, moxi<0.16370.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.16370.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.16370.0>: "ports": { moxi<0.16370.0>: "proxy": 11211, moxi<0.16370.0>: "direct": 11210 moxi<0.16370.0>: } moxi<0.16370.0>: }], moxi<0.16370.0>: "vBucketServerMap": { moxi<0.16370.0>: "hashAlgorithm": "CRC", moxi<0.16370.0>: "numReplicas": 2, moxi<0.16370.0>: "serverList": ["127.0.0.1:11210"], moxi<0.16370.0>: "vBucketMap": [] moxi<0.16370.0>: } moxi<0.16370.0>: }) [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason shutdown [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "default"}, <0.16383.0>, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:<0.16383.0>:single_bucket_sup:top_loop:27] Delegating exit {'EXIT', <0.385.0>, shutdown} to child supervisor: <0.16384.0> [user:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:terminate:348] Shutting down bucket "default" on 'ns_1@127.0.0.1' for deletion [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log ns_memcached:2("Shutting down bucket \"default\" on 'ns_1@127.0.0.1' for deletion") because it's been seen 4 times in the past 54.116527 secs (last seen 1.065528 secs ago [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:10] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/master">>: ok [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:<0.16383.0>:single_bucket_sup:top_loop:24] per-bucket supervisor for "default" died with reason shutdown [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Shutting down tap connections! [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:<0.16697.0>:ns_port_sup:restart_port:134] restarting port: {moxi, "/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p", "0", "-Y", "y", "-O", "stderr", []], [{env, [{"EVENT_NOSELECT", "1"}, {"MOXI_SASL_PLAIN_USR", "Administrator"}, {"MOXI_SASL_PLAIN_PWD", "password"}]}, use_stdio, exit_status, port_server_send_eol, stderr_to_stdout, stream]} [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:<0.15431.0>:menelaus_web:handle_streaming:950] menelaus_web streaming socket closed by client [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:<0.16370.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:<0.16370.0>:ns_port_server:log:166] moxi<0.16370.0>: EOL on stdin. Exiting [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:<0.16698.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [menelaus:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:<0.16039.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [error_logger:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.16698.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:11("Deleted bucket \"default\"\n") because it's been seen 4 times in the past 54.056673 secs (last seen 1.263247 secs ago [menelaus:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:<0.16017.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:12("Created bucket \"default\" of type: membase\n") because it's been seen 5 times in the past 66.780796 secs (last seen 1.277542 secs ago [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:<0.16708.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,3}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "default"}, {single_bucket_sup, start_link, ["default"]}, permanent, infinity, supervisor, [single_bucket_sup]} [error_logger:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.16711.0>}, {name,{per_bucket_sup,"default"}}, {mfargs,{single_bucket_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,3}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:ensure_bucket:700] Created bucket "default" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=209715200;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/default;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=default;couch_port=11213;max_vbuckets=256;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16714.0>}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16726.0>}, {name,{ns_memcached,data,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16727.0>}, {name,{ns_vbm_sup,"default"}}, {mfargs,{ns_vbm_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16728.0>}, {name,{ns_vbm_new_sup,"default"}}, {mfargs,{ns_vbm_new_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16729.0>}, {name,{couch_stats_reader,"default"}}, {mfargs,{couch_stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16730.0>}, {name,{stats_collector,"default"}}, {mfargs,{stats_collector,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16732.0>}, {name,{stats_archiver,"default"}}, {mfargs,{stats_archiver,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16734.0>}, {name,{stats_reader,"default"}}, {mfargs,{stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.16735.0>}, {name,{failover_safeness_level,"default"}}, {mfargs, {failover_safeness_level,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.16713.0>}, {name,{ns_memcached_sup,"default"}}, {mfargs,{ns_memcached_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.16736.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 0 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 1 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 2 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 3 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 4 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 5 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 6 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 7 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 8 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 9 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 10 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 11 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 12 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 13 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 14 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 15 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 16 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 17 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 18 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 19 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 20 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 21 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 22 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 23 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 24 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 25 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 26 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 27 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 28 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 29 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 30 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 31 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 32 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 33 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 34 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 35 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 36 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 37 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 38 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 39 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 40 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 41 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 42 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 43 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 44 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 45 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 46 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 47 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 48 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 49 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 50 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 51 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 52 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 53 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 54 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 55 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 56 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 57 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 58 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 59 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 60 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 61 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 62 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 63 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 64 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 65 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 66 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 67 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 68 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 69 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 70 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 71 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 72 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 73 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 74 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 75 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 76 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 77 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 78 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 79 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 80 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 81 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 82 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 83 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 84 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 85 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 86 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 87 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 88 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 89 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 90 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 91 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 92 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 93 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 94 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 95 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 96 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 97 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 98 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 99 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 100 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 101 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 102 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 103 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 104 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 105 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 106 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 107 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 108 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 109 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 110 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 111 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 112 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 113 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 114 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 115 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 116 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 117 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 118 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 119 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 120 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 121 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 122 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 123 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 124 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 125 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 126 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 127 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 128 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 129 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 130 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 131 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 132 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 133 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 134 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 135 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 136 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 137 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 138 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 139 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 140 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 141 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 142 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 143 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 144 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 145 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 146 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 147 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 148 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 149 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 150 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 151 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 152 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 153 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 154 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 155 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 156 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 157 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 158 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 159 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 160 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 161 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 162 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 163 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 164 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 165 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 166 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 167 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 168 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 169 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 170 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 171 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 172 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 173 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 174 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 175 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 176 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 177 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 178 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 179 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 180 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 181 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 182 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 183 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 184 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 185 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 186 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 187 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 188 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 189 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 190 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 191 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 192 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 193 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 194 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 195 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 196 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 197 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 198 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 199 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 200 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 201 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 202 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 203 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 204 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 205 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 206 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 207 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 208 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 209 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 210 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 211 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 212 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 213 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 214 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 215 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 216 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 217 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 218 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 219 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 220 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 221 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 222 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 223 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 224 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 225 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 226 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 227 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 228 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 229 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 230 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 231 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 232 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 233 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 234 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 235 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 236 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 237 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 238 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 239 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 240 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 241 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 242 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 243 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 244 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 245 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 246 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 247 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 248 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 249 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 250 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 251 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 252 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 253 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 254 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 255 in <<"default">>: {not_found, no_db_file} [error_logger:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.16750.0>}, {name,{capi_set_view_manager,"default"}}, {mfargs,{capi_set_view_manager,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [views:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [views:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:<0.16699.0>:ns_port_server:log:166] moxi<0.16699.0>: 2012-03-26 01:04:11: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.16699.0>: 2012-03-26 01:04:11: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) moxi<0.16699.0>: 2012-03-26 01:04:13: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.16699.0>: "name": "default", moxi<0.16699.0>: "nodeLocator": "vbucket", moxi<0.16699.0>: "saslPassword": "", moxi<0.16699.0>: "nodes": [{ moxi<0.16699.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.16699.0>: "replication": 0, moxi<0.16699.0>: "clusterMembership": "active", moxi<0.16699.0>: "status": "warmup", moxi<0.16699.0>: "thisNode": true, moxi<0.16699.0>: "hostname": "127.0.0.1:8091", moxi<0.16699.0>: "clusterCompatibility": 1, moxi<0.16699.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.16699.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.16699.0>: "ports": { moxi<0.16699.0>: "proxy": 11211, moxi<0.16699.0>: "direct": 11210 moxi<0.16699.0>: } moxi<0.16699.0>: }], moxi<0.16699.0>: "vBucketServerMap": { moxi<0.16699.0>: "hashAlgorithm": "CRC", moxi<0.16699.0>: "numReplicas": 3, moxi<0.16699.0>: "serverList": ["127.0.0.1:11210"], moxi<0.16699.0>: "vBucketMap": [] moxi<0.16699.0>: } moxi<0.16699.0>: }) [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason shutdown [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "default"}, <0.16711.0>, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:<0.16711.0>:single_bucket_sup:top_loop:27] Delegating exit {'EXIT', <0.385.0>, shutdown} to child supervisor: <0.16712.0> [user:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:terminate:348] Shutting down bucket "default" on 'ns_1@127.0.0.1' for deletion [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log ns_memcached:2("Shutting down bucket \"default\" on 'ns_1@127.0.0.1' for deletion") because it's been seen 5 times in the past 55.390831 secs (last seen 1.274304 secs ago [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:11] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.366.0>: Failed to load mutation log, falling back to key dump memcached<0.366.0>: metadata loaded in 184 usec memcached<0.366.0>: warmup completed in 275 usec [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/master">>: ok [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:<0.16711.0>:single_bucket_sup:top_loop:24] per-bucket supervisor for "default" died with reason shutdown [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Shutting down tap connections! [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:<0.17029.0>:ns_port_sup:restart_port:134] restarting port: {moxi, "/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p", "0", "-Y", "y", "-O", "stderr", []], [{env, [{"EVENT_NOSELECT", "1"}, {"MOXI_SASL_PLAIN_USR", "Administrator"}, {"MOXI_SASL_PLAIN_PWD", "password"}]}, use_stdio, exit_status, port_server_send_eol, stderr_to_stdout, stream]} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:<0.15706.0>:menelaus_web:handle_streaming:950] menelaus_web streaming socket closed by client [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:<0.16699.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:<0.16699.0>:ns_port_server:log:166] moxi<0.16699.0>: EOL on stdin. Exiting [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:<0.17030.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [menelaus:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:<0.16371.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [error_logger:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.17030.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:11("Deleted bucket \"default\"\n") because it's been seen 5 times in the past 55.341288 secs (last seen 1.284615 secs ago [menelaus:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:<0.16703.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" of type: membase [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:148] Starting moxi: {"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [], 13211, 8091} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:<0.17042.0>:ns_janitor:wait_for_memcached:278] Waiting for "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [{num_replicas,0}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,13211}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [error_logger:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_moxi_sup} started: [{pid,<0.17043.0>}, {name, {"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [],13211,8091}}, {mfargs, {ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-B","auto","-z", "url=http://127.0.0.1:8091/pools/default/bucketsStreaming/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", "-Z", "port_listen=13211,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-p","0","-Y","y","-O","stderr"], [use_stdio,stderr_to_stdout, {env, [{"MOXI_SASL_PLAIN_USR", "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}, {"MOXI_SASL_PLAIN_PWD",[]}]}]]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [error_logger:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.17047.0>}, {name, {per_bucket_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {single_bucket_sup,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}, {single_bucket_sup, start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}, permanent, infinity, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [{num_replicas,0}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,13211}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'ns_memcached-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':ns_memcached:ensure_bucket:700] Created bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=209715200;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276;couch_port=11213;max_vbuckets=256;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17050.0>}, {name, {ns_memcached,stats, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_memcached,start_link, [{"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17062.0>}, {name, {ns_memcached,data, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_memcached,start_link, [{"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17063.0>}, {name, {ns_vbm_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_vbm_sup,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17064.0>}, {name, {ns_vbm_new_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_vbm_new_sup,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17065.0>}, {name, {couch_stats_reader, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {couch_stats_reader,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17066.0>}, {name, {stats_collector, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {stats_collector,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17068.0>}, {name, {stats_archiver, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {stats_archiver,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-minute' [error_logger:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17070.0>}, {name, {stats_reader, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {stats_reader,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17071.0>}, {name, {failover_safeness_level, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {failover_safeness_level,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17049.0>}, {name, {ns_memcached_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_memcached_sup,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17073.0>}, {name, {capi_ddoc_replication_srv, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {capi_ddoc_replication_srv,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-minute', [{name,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-minute'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749052,777660},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,336,<0.17074.0>}} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-minute', [{name,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-minute'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749052,777660},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,336,<0.17074.0>}} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-hour' [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 0 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 1 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 2 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 3 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 4 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 5 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 6 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 7 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 8 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 9 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 10 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 11 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 12 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 13 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 14 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 15 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 16 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 17 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [stats:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:<0.17066.0>:stats_collector:handle_info:84] Stats for bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276": accepting_conns 1 auth_cmds 0 auth_errors 0 bucket_active_conns 1 bucket_conns 2 bytes 118503 bytes_read 24 bytes_written 48 cas_badval 0 cas_hits 0 cas_misses 0 cmd_flush 0 cmd_get 0 cmd_set 0 conn_yields 0 connection_structures 0 curr_connections 12 curr_items 0 curr_items_tot 0 daemon_connections 10 decr_hits 0 decr_misses 0 delete_hits 0 delete_misses 0 ep_allow_data_loss_during_shutdown 1 ep_alog_block_size 4096 ep_alog_path ep_alog_sleep_time 30 ep_backend couchdb ep_bg_fetch_delay 0 ep_bg_fetched 0 ep_cache_size 0 ep_chk_max_items 30000 ep_chk_period 3600 ep_chk_remover_stime 5 ep_commit_num 0 ep_commit_time 0 ep_commit_time_total 0 ep_concurrentDB 1 ep_config_file ep_couch_bucket new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276 ep_couch_default_batch_size 500 ep_couch_host localhost ep_couch_port 11213 ep_couch_reconnect_sleeptime 250 ep_couch_response_timeout 600000 ep_couch_vbucket_batch_count 4 ep_data_age 0 ep_data_age_highwat 0 ep_db_cleaner_status running ep_db_shards 4 ep_db_strategy multiDB ep_dbinit 0 ep_dbname /opt/couchbase/var/lib/couchdb/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276 ep_degraded_mode 0 ep_diskqueue_drain 0 ep_diskqueue_fill 0 ep_diskqueue_items 0 ep_diskqueue_memory 0 ep_diskqueue_pending 0 ep_exp_pager_stime 3600 ep_exp_pager_stime 3600 ep_expired 0 ep_expiry_window 3 ep_failpartialwarmup 0 ep_flush_all false ep_flush_duration 0 ep_flush_duration_highwat 0 ep_flush_duration_total 0 ep_flush_preempts 0 ep_flusher_state initializing ep_flusher_todo 0 ep_getl_default_timeout 15 ep_getl_max_timeout 30 ep_ht_locks 5 ep_ht_size 3079 ep_inconsistent_slave_chk 0 ep_initfile ep_io_num_read 0 ep_io_num_write 0 ep_io_read_bytes 0 ep_io_write_bytes 0 ep_item_begin_failed 0 ep_item_commit_failed 0 ep_item_flush_expired 0 ep_item_flush_failed 0 ep_item_num_based_new_chk 1 ep_items_rm_from_checkpoints 0 ep_keep_closed_chks 0 ep_klog_block_size 4096 ep_klog_compactor_queue_cap 500000 ep_klog_compactor_stime 3600 ep_klog_flush commit2 ep_klog_max_entry_ratio 10 ep_klog_max_log_size 2147483647 ep_klog_path ep_klog_sync commit2 ep_kv_size 0 ep_latency_arith_cmd 0 ep_latency_get_cmd 0 ep_max_checkpoints 2 ep_max_data_size 209715200 ep_max_item_size 20971520 ep_max_size 209715200 ep_max_txn_size 10000 ep_max_vbuckets 256 ep_mem_high_wat 157286400 ep_mem_high_wat 18446744073709551615 ep_mem_low_wat 125829120 ep_mem_low_wat 18446744073709551615 ep_min_data_age 0 ep_mlog_compactor_runs 0 ep_mutation_mem_threshold 0 ep_num_active_non_resident 0 ep_num_checkpoint_remover_runs 0 ep_num_eject_failures 0 ep_num_eject_replicas 0 ep_num_expiry_pager_runs 0 ep_num_non_resident 0 ep_num_not_my_vbuckets 0 ep_num_pager_runs 0 ep_num_value_ejects 0 ep_obs_reg_clean_job 0 ep_observe_calls 0 ep_observe_errors 0 ep_observe_registry_size 0 ep_oom_errors 0 ep_overhead 1312 ep_pending_ops 0 ep_pending_ops_max 0 ep_pending_ops_max_duration 0 ep_pending_ops_total 0 ep_postInitfile ep_queue_age_cap 900 ep_queue_size 0 ep_restore_file_checks 1 ep_restore_mode 0 ep_shardpattern %d/%b-%i.sqlite ep_stats_observe_polls 0 ep_storage_age 0 ep_storage_age_highwat 0 ep_storage_type featured ep_store_max_concurrency 10 ep_store_max_readers 9 ep_store_max_readwrite 1 ep_stored_val_type ep_tap_ack_grace_period 300 ep_tap_ack_initial_sequence_number 1 ep_tap_ack_interval 1000 ep_tap_ack_window_size 10 ep_tap_backfill_resident 0.9 ep_tap_backlog_limit 5000 ep_tap_backoff_period 5 ep_tap_bg_fetch_requeued 0 ep_tap_bg_fetched 0 ep_tap_bg_max_pending 500 ep_tap_conn_map_notifications 0 ep_tap_keepalive 300 ep_tap_noop_interval 20 ep_tap_requeue_sleep_time 0.1 ep_tap_throttle_queue_cap 1000000 ep_tap_throttle_threshold 90 ep_tmp_item_expiry_window 3600 ep_tmp_oom_errors 0 ep_too_old 0 ep_too_young 0 ep_total_cache_size 0 ep_total_del_items 0 ep_total_enqueued 0 ep_total_new_items 0 ep_total_observe_sets 0 ep_total_persisted 0 ep_uncommitted_items 0 ep_unobserve_calls 0 ep_value_size 0 ep_vb0 0 ep_vb_chunk_del_time 500 ep_vb_del_chunk_size 100 ep_vb_total 0 ep_vbucket_del 0 ep_vbucket_del_fail 0 ep_version 1.8.1r_467_g931cd34 ep_waitforwarmup 0 ep_warmup 1 ep_warmup_batch_size 1000 ep_warmup_min_items_threshold 10 ep_warmup_min_memory_threshold 10 get_hits 0 get_misses 0 incr_hits 0 incr_misses 0 libevent 2.0.11-stable limit_maxbytes 67108864 listen_disabled_num 0 mem_used 118503 pid 2631 pointer_size 64 rejected_conns 0 rusage_system 1.307801 rusage_user 9.114614 threads 4 time 1332749051 total_connections 41 uptime 74 vb_active_curr_items 0 vb_active_eject 0 vb_active_ht_memory 0 vb_active_itm_memory 0 vb_active_num 0 vb_active_num_non_resident 0 vb_active_ops_create 0 vb_active_ops_delete 0 vb_active_ops_reject 0 vb_active_ops_update 0 vb_active_perc_mem_resident 0 vb_active_queue_age 0 vb_active_queue_drain 0 vb_active_queue_fill 0 vb_active_queue_memory 0 vb_active_queue_pending 0 vb_active_queue_size 0 vb_dead_num 0 vb_pending_curr_items 0 vb_pending_eject 0 vb_pending_ht_memory 0 vb_pending_itm_memory 0 vb_pending_num 0 vb_pending_num_non_resident 0 vb_pending_ops_create 0 vb_pending_ops_delete 0 vb_pending_ops_reject 0 vb_pending_ops_update 0 vb_pending_perc_mem_resident 0 vb_pending_queue_age 0 vb_pending_queue_drain 0 vb_pending_queue_fill 0 vb_pending_queue_memory 0 vb_pending_queue_pending 0 vb_pending_queue_size 0 vb_replica_curr_items 0 vb_replica_eject 0 vb_replica_ht_memory 0 vb_replica_itm_memory 0 vb_replica_num 0 vb_replica_num_non_resident 0 vb_replica_ops_create 0 vb_replica_ops_delete 0 vb_replica_ops_reject 0 vb_replica_ops_update 0 vb_replica_perc_mem_resident 0 vb_replica_queue_age 0 vb_replica_queue_drain 0 vb_replica_queue_fill 0 vb_replica_queue_memory 0 vb_replica_queue_pending 0 vb_replica_queue_size 0 version 1.4.4_516_g2f7d183 [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 18 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 19 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 20 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 21 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 22 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 23 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 24 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 25 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 26 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 27 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 28 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 29 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 30 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 31 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-hour', [{name,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-hour'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749052,785863},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,337,<0.17100.0>}} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-hour', [{name,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-hour'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749052,785863},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,337,<0.17100.0>}} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-day' [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 32 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 33 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 34 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 35 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 36 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 37 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 38 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 39 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 40 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 41 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 42 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 43 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 44 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 45 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 46 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 47 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 48 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 49 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 50 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 51 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 52 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 53 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 54 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 55 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 56 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 57 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 58 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 59 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 60 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 61 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 62 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 63 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 64 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 65 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 66 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 67 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 68 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 69 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 70 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 71 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 72 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 73 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 74 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 75 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 76 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 77 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-day', [{name,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-day'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749052,802777},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,339,<0.17139.0>}} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-day', [{name,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-day'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749052,802777},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,339,<0.17139.0>}} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-week' [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 78 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 79 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 80 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 81 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 82 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 83 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 84 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 85 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 86 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 87 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 88 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 89 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 90 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 91 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 92 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 93 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 94 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 95 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 96 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 97 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 98 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 99 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 100 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 101 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 102 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 103 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 104 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 105 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 106 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 107 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-week', [{name,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-week'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749052,816263},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,340,<0.17192.0>}} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-week', [{name,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-week'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749052,816263},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,340,<0.17192.0>}} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-month' [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 108 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 109 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 110 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 111 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 112 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 113 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 114 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 115 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 116 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 117 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 118 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 119 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 120 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 121 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 122 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 123 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 124 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 125 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 126 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 127 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 128 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 129 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 130 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 131 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 132 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 133 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 134 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 135 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 136 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 137 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 138 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-month', [{name,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-month'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749052,823358},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,341,<0.17229.0>}} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-month', [{name,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-month'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749052,823358},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,341,<0.17229.0>}} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-year' [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 139 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 140 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 141 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 142 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 143 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 144 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 145 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 146 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 147 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 148 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 149 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 150 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 151 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 152 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 153 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 154 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 155 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 156 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 157 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 158 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 159 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 160 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 161 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 162 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 163 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 164 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 165 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 166 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 167 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 168 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 169 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 170 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 171 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 172 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 173 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 174 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 175 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 176 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-year', [{name,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-year'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749052,834798},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,342,<0.17267.0>}} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-year', [{name,'stats_archiver-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276-year'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749052,834798},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,342,<0.17267.0>}} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 177 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 178 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 179 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 180 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 181 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 182 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 183 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 184 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 185 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 186 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 187 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 188 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 189 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 190 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 191 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 192 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 193 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 194 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 195 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 196 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 197 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 198 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 199 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 200 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 201 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 202 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 203 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 204 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 205 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 206 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 207 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 208 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 209 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 210 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 211 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 212 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 213 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 214 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 215 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 216 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 217 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 218 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 219 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 220 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 221 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 222 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 223 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 224 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 225 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 226 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 227 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 228 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 229 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 230 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 231 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 232 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 233 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 234 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 235 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 236 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 237 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 238 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 239 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 240 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 241 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 242 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 243 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 244 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 245 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 246 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 247 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 248 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 249 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 250 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 251 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 252 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 253 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 254 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [views:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':capi_set_view_manager:apply_map:445] Applying map to bucket new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [couchdb:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 255 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [views:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':capi_set_view_manager:apply_map:450] Classified vbuckets for new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [error_logger:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17093.0>}, {name, {capi_set_view_manager, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {capi_set_view_manager,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" with reason shutdown [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}, <0.17047.0>, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:<0.17047.0>:single_bucket_sup:top_loop:27] Delegating exit {'EXIT', <0.385.0>, shutdown} to child supervisor: <0.17048.0> [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:138] Killing unwanted moxi: {"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [], 13211, 8091} [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:<0.17043.0>:ns_port_server:log:166] moxi<0.17043.0>: 2012-03-26 01:04:12: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (47) moxi<0.17043.0>: 2012-03-26 01:04:14: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/bucketsStreaming/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({"name":"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276","bucketType":"membase","authType":"none","saslPassword":"","proxyPort":13211,"replicaIndex":true,"uri":"/pools/default/buckets/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276","streamingUri":"/pools/default/bucketsStreaming/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276","flushCacheUri":"/pools/default/buckets/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276/controller/doFlush","nodes":[{"couchApiBase":"http://127.0.0.1:8092/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276","replication":0.0,"clusterMembership":"active","status":"warmup","thisNode":true,"hostname":"127.0.0.1:8091","clusterCompatibility":1,"version":"2.0.0r-944-rel-enterprise","os":"x86_64-unknown-linux-gnu","ports":{"prox [user:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:'ns_memcached-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':ns_memcached:terminate:348] Shutting down bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" on 'ns_1@127.0.0.1' for deletion [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:<0.16704.0>:menelaus_web:handle_streaming:943] closing streaming socket [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:<0.17031.0>:ns_port_server:log:166] moxi<0.17031.0>: 2012-03-26 01:04:12: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.17031.0>: 2012-03-26 01:04:12: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) [ns_server:info] [2012-03-26 1:04:12] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Warning: Data diretory does not exist, /opt/couchbase/var/lib/couchdb/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276 memcached<0.366.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.366.0>: Failed to load mutation log, falling back to key dump memcached<0.366.0>: Warning: Data diretory is empty, /opt/couchbase/var/lib/couchdb/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276 memcached<0.366.0>: metadata loaded in 231 usec memcached<0.366.0>: Warning: Data diretory is empty, /opt/couchbase/var/lib/couchdb/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276 memcached<0.366.0>: warmup completed in 342 usec [ns_server:info] [2012-03-26 1:04:13] [ns_1@127.0.0.1:'ns_memcached-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276/master">>: ok [ns_server:info] [2012-03-26 1:04:13] [ns_1@127.0.0.1:<0.17047.0>:single_bucket_sup:top_loop:24] per-bucket supervisor for "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" died with reason shutdown [ns_server:info] [2012-03-26 1:04:13] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Shutting down tap connections! [ns_server:info] [2012-03-26 1:04:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:13] [ns_1@127.0.0.1:<0.17406.0>:ns_port_sup:restart_port:134] restarting port: {moxi, "/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p", "0", "-Y", "y", "-O", "stderr", []], [{env, [{"EVENT_NOSELECT", "1"}, {"MOXI_SASL_PLAIN_USR", "Administrator"}, {"MOXI_SASL_PLAIN_PWD", "password"}]}, use_stdio, exit_status, port_server_send_eol, stderr_to_stdout, stream]} [ns_server:info] [2012-03-26 1:04:13] [ns_1@127.0.0.1:<0.17031.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-03-26 1:04:13] [ns_1@127.0.0.1:<0.17031.0>:ns_port_server:log:166] moxi<0.17031.0>: EOL on stdin. Exiting [ns_server:info] [2012-03-26 1:04:13] [ns_1@127.0.0.1:<0.17407.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [ns_server:info] [2012-03-26 1:04:13] [ns_1@127.0.0.1:<0.16347.0>:menelaus_web:handle_streaming:950] menelaus_web streaming socket closed by client [error_logger:info] [2012-03-26 1:04:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.17407.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [menelaus:info] [2012-03-26 1:04:13] [ns_1@127.0.0.1:<0.17013.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:<0.17419.0>:ns_janitor:wait_for_memcached:278] Waiting for "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,13211}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [menelaus:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:<0.17032.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" of type: membase [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:12("Created bucket \"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276\" of type: membase\n") because it's been seen 1 times in the past 1.28941 secs (last seen 1.28941 secs ago [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:148] Starting moxi: {"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [], 13211, 8091} [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}, {single_bucket_sup, start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}, permanent, infinity, supervisor, [single_bucket_sup]} [error_logger:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.17423.0>}, {name, {per_bucket_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {single_bucket_sup,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,13211}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [error_logger:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_moxi_sup} started: [{pid,<0.17422.0>}, {name, {"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [],13211,8091}}, {mfargs, {ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-B","auto","-z", "url=http://127.0.0.1:8091/pools/default/bucketsStreaming/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", "-Z", "port_listen=13211,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-p","0","-Y","y","-O","stderr"], [use_stdio,stderr_to_stdout, {env, [{"MOXI_SASL_PLAIN_USR", "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}, {"MOXI_SASL_PLAIN_PWD",[]}]}]]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'ns_memcached-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':ns_memcached:ensure_bucket:700] Created bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=209715200;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276;couch_port=11213;max_vbuckets=256;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17427.0>}, {name, {ns_memcached,stats, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_memcached,start_link, [{"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17438.0>}, {name, {ns_memcached,data, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_memcached,start_link, [{"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17439.0>}, {name, {ns_vbm_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_vbm_sup,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17440.0>}, {name, {ns_vbm_new_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_vbm_new_sup,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17441.0>}, {name, {couch_stats_reader, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {couch_stats_reader,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17442.0>}, {name, {stats_collector, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {stats_collector,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17444.0>}, {name, {stats_archiver, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {stats_archiver,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17446.0>}, {name, {stats_reader, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {stats_reader,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17447.0>}, {name, {failover_safeness_level, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {failover_safeness_level,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17426.0>}, {name, {ns_memcached_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_memcached_sup,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17448.0>}, {name, {capi_ddoc_replication_srv, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {capi_ddoc_replication_srv,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 0 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 1 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 2 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 3 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 4 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 5 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 6 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 7 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 8 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 9 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 10 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 11 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 12 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 13 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 14 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 15 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 16 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 17 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 18 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 19 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 20 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 21 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 22 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 23 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 24 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 25 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 26 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 27 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 28 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 29 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 30 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 31 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 32 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 33 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 34 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 35 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 36 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 37 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 38 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 39 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 40 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 41 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 42 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 43 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 44 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 45 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 46 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 47 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 48 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 49 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 50 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 51 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 52 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 53 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 54 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 55 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 56 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 57 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 58 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 59 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 60 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 61 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 62 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 63 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 64 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 65 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 66 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 67 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 68 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 69 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 70 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 71 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 72 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 73 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 74 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 75 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 76 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 77 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 78 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 79 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 80 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 81 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 82 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 83 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 84 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 85 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 86 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 87 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 88 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 89 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 90 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 91 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 92 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 93 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 94 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 95 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 96 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 97 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 98 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 99 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 100 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 101 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 102 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 103 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 104 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 105 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 106 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 107 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 108 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 109 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 110 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 111 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 112 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 113 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 114 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 115 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 116 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 117 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 118 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 119 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 120 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 121 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 122 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 123 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 124 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 125 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 126 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 127 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 128 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 129 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 130 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 131 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 132 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 133 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 134 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 135 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 136 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 137 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 138 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 139 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 140 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 141 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 142 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 143 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 144 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 145 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 146 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 147 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 148 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 149 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 150 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 151 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 152 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 153 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 154 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 155 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 156 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 157 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 158 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 159 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 160 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 161 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 162 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 163 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 164 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 165 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 166 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 167 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 168 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 169 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 170 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 171 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 172 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 173 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 174 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 175 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 176 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 177 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 178 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 179 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 180 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 181 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 182 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 183 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 184 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 185 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 186 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 187 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 188 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 189 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 190 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 191 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 192 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 193 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 194 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 195 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 196 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 197 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 198 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 199 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 200 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 201 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 202 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 203 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 204 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 205 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 206 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 207 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 208 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 209 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 210 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 211 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 212 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 213 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 214 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 215 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 216 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 217 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 218 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 219 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 220 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 221 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 222 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 223 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 224 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 225 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 226 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 227 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 228 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 229 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 230 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 231 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 232 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 233 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 234 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 235 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 236 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 237 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 238 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 239 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 240 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 241 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 242 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 243 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 244 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 245 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 246 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 247 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 248 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 249 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 250 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 251 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 252 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 253 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 254 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 255 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [views:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':capi_set_view_manager:apply_map:445] Applying map to bucket new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [views:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':capi_set_view_manager:apply_map:450] Classified vbuckets for new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [error_logger:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17463.0>}, {name, {capi_set_view_manager, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {capi_set_view_manager,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:<0.17408.0>:ns_port_server:log:166] moxi<0.17408.0>: 2012-03-26 01:04:13: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.17408.0>: 2012-03-26 01:04:13: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" with reason shutdown [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}, <0.17423.0>, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:<0.17423.0>:single_bucket_sup:top_loop:27] Delegating exit {'EXIT', <0.385.0>, shutdown} to child supervisor: <0.17424.0> [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:138] Killing unwanted moxi: {"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [], 13211, 8091} [user:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:'ns_memcached-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':ns_memcached:terminate:348] Shutting down bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" on 'ns_1@127.0.0.1' for deletion [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:<0.17422.0>:ns_port_server:log:166] moxi<0.17422.0>: 2012-03-26 01:04:14: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (47) moxi<0.17422.0>: 2012-03-26 01:04:16: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/bucketsStreaming/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({"name":"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276","bucketType":"membase","authType":"none","saslPassword":"","proxyPort":13211,"replicaIndex":true,"uri":"/pools/default/buckets/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276","streamingUri":"/pools/default/bucketsStreaming/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276","flushCacheUri":"/pools/default/buckets/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276/controller/doFlush","nodes":[{"couchApiBase":"http://127.0.0.1:8092/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276","replication":0.0,"clusterMembership":"active","status":"warmup","thisNode":true,"hostname":"127.0.0.1:8091","clusterCompatibility":1,"version":"2.0.0r-944-rel-enterprise","os":"x86_64-unknown-linux-gnu","ports":{"prox [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log ns_memcached:2("Shutting down bucket \"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276\" on 'ns_1@127.0.0.1' for deletion") because it's been seen 1 times in the past 1.358326 secs (last seen 1.358326 secs ago [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:<0.17033.0>:menelaus_web:handle_streaming:943] closing streaming socket [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:14] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.366.0>: Failed to load mutation log, falling back to key dump memcached<0.366.0>: metadata loaded in 821 usec memcached<0.366.0>: warmup completed in 985 usec [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'ns_memcached-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276/master">>: ok [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:<0.17423.0>:single_bucket_sup:top_loop:24] per-bucket supervisor for "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" died with reason shutdown [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:<0.17743.0>:ns_port_sup:restart_port:134] restarting port: {moxi, "/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p", "0", "-Y", "y", "-O", "stderr", []], [{env, [{"EVENT_NOSELECT", "1"}, {"MOXI_SASL_PLAIN_USR", "Administrator"}, {"MOXI_SASL_PLAIN_PWD", "password"}]}, use_stdio, exit_status, port_server_send_eol, stderr_to_stdout, stream]} [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:<0.16373.0>:menelaus_web:handle_streaming:950] menelaus_web streaming socket closed by client [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:<0.17408.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:<0.17408.0>:ns_port_server:log:166] moxi<0.17408.0>: EOL on stdin. Exiting [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:<0.17744.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [menelaus:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:<0.17035.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" [error_logger:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.17744.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:11("Deleted bucket \"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276\"\n") because it's been seen 1 times in the past 1.084144 secs (last seen 1.084144 secs ago [menelaus:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:<0.17415.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" of type: membase [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:12("Created bucket \"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276\" of type: membase\n") because it's been seen 2 times in the past 2.373204 secs (last seen 1.083794 secs ago [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:<0.17756.0>:ns_janitor:wait_for_memcached:278] Waiting for "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:148] Starting moxi: {"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [], 13211, 8091} [error_logger:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_moxi_sup} started: [{pid,<0.17758.0>}, {name, {"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [],13211,8091}}, {mfargs, {ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-B","auto","-z", "url=http://127.0.0.1:8091/pools/default/bucketsStreaming/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", "-Z", "port_listen=13211,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-p","0","-Y","y","-O","stderr"], [use_stdio,stderr_to_stdout, {env, [{"MOXI_SASL_PLAIN_USR", "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}, {"MOXI_SASL_PLAIN_PWD",[]}]}]]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [{num_replicas,2}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,13211}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}, {single_bucket_sup, start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}, permanent, infinity, supervisor, [single_bucket_sup]} [error_logger:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.17761.0>}, {name, {per_bucket_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {single_bucket_sup,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [{num_replicas,2}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,13211}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'ns_memcached-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':ns_memcached:ensure_bucket:700] Created bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=209715200;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276;couch_port=11213;max_vbuckets=256;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17764.0>}, {name, {ns_memcached,stats, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_memcached,start_link, [{"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17774.0>}, {name, {ns_memcached,data, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_memcached,start_link, [{"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17775.0>}, {name, {ns_vbm_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_vbm_sup,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17776.0>}, {name, {ns_vbm_new_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_vbm_new_sup,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17777.0>}, {name, {couch_stats_reader, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {couch_stats_reader,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17778.0>}, {name, {stats_collector, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {stats_collector,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17780.0>}, {name, {stats_archiver, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {stats_archiver,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17782.0>}, {name, {stats_reader, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {stats_reader,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17783.0>}, {name, {failover_safeness_level, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {failover_safeness_level,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17763.0>}, {name, {ns_memcached_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_memcached_sup,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17784.0>}, {name, {capi_ddoc_replication_srv, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {capi_ddoc_replication_srv,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 0 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 1 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 2 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 3 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 4 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 5 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 6 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 7 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 8 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 9 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 10 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 11 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 12 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 13 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 14 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 15 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 16 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 17 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 18 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 19 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 20 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 21 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 22 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 23 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 24 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 25 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 26 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 27 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 28 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 29 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 30 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 31 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 32 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 33 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 34 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 35 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 36 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 37 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 38 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 39 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 40 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 41 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 42 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 43 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 44 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 45 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 46 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 47 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 48 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 49 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 50 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 51 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 52 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 53 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 54 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 55 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 56 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 57 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 58 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 59 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 60 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 61 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 62 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 63 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 64 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 65 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 66 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 67 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 68 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 69 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 70 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 71 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 72 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 73 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 74 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 75 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 76 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 77 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 78 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 79 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 80 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 81 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 82 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 83 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 84 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 85 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 86 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 87 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 88 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 89 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 90 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 91 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 92 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 93 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 94 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 95 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 96 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 97 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 98 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 99 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 100 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 101 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 102 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 103 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 104 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 105 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 106 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 107 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 108 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 109 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 110 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 111 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 112 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 113 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 114 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 115 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 116 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 117 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 118 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 119 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 120 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 121 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 122 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 123 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 124 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 125 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 126 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 127 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 128 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 129 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 130 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 131 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 132 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 133 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 134 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 135 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 136 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 137 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 138 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 139 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 140 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 141 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 142 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 143 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 144 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 145 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 146 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 147 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 148 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 149 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 150 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 151 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 152 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 153 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 154 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 155 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 156 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 157 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 158 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 159 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 160 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 161 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 162 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 163 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 164 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 165 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 166 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 167 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 168 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 169 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 170 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 171 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 172 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 173 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 174 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 175 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 176 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 177 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 178 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 179 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 180 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 181 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 182 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 183 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 184 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 185 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 186 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 187 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 188 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 189 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 190 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 191 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 192 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 193 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 194 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 195 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 196 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 197 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 198 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 199 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 200 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 201 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 202 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 203 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 204 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 205 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 206 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 207 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 208 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 209 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 210 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 211 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 212 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 213 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 214 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 215 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 216 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 217 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 218 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 219 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 220 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 221 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 222 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 223 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 224 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 225 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 226 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 227 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 228 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 229 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 230 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 231 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 232 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 233 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 234 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 235 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 236 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 237 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 238 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 239 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 240 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 241 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 242 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 243 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 244 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 245 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 246 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 247 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 248 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 249 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 250 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 251 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 252 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 253 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 254 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 255 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [views:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':capi_set_view_manager:apply_map:445] Applying map to bucket new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [views:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':capi_set_view_manager:apply_map:450] Classified vbuckets for new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [error_logger:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.17800.0>}, {name, {capi_set_view_manager, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {capi_set_view_manager,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Shutting down tap connections! memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.366.0>: Failed to load mutation log, falling back to key dump memcached<0.366.0>: metadata loaded in 341 usec memcached<0.366.0>: warmup completed in 463 usec [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:<0.17745.0>:ns_port_server:log:166] moxi<0.17745.0>: 2012-03-26 01:04:15: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.17745.0>: 2012-03-26 01:04:15: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" with reason shutdown [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}, <0.17761.0>, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:<0.17761.0>:single_bucket_sup:top_loop:27] Delegating exit {'EXIT', <0.385.0>, shutdown} to child supervisor: <0.17762.0> [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:138] Killing unwanted moxi: {"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [], 13211, 8091} [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:<0.17758.0>:ns_port_server:log:166] moxi<0.17758.0>: 2012-03-26 01:04:15: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (47) moxi<0.17758.0>: 2012-03-26 01:04:17: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/bucketsStreaming/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({"name":"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276","bucketType":"membase","authType":"none","saslPassword":"","proxyPort":13211,"replicaIndex":true,"uri":"/pools/default/buckets/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276","streamingUri":"/pools/default/bucketsStreaming/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276","flushCacheUri":"/pools/default/buckets/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276/controller/doFlush","nodes":[{"couchApiBase":"http://127.0.0.1:8092/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276","replication":0.0,"clusterMembership":"active","status":"warmup","thisNode":true,"hostname":"127.0.0.1:8091","clusterCompatibility":1,"version":"2.0.0r-944-rel-enterprise","os":"x86_64-unknown-linux-gnu","ports":{"prox [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [user:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:'ns_memcached-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':ns_memcached:terminate:348] Shutting down bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" on 'ns_1@127.0.0.1' for deletion [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log ns_memcached:2("Shutting down bucket \"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276\" on 'ns_1@127.0.0.1' for deletion") because it's been seen 2 times in the past 2.44446 secs (last seen 1.086134 secs ago [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:<0.17431.0>:menelaus_web:handle_streaming:943] closing streaming socket [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'ns_memcached-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276/master">>: ok [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:<0.17761.0>:single_bucket_sup:top_loop:24] per-bucket supervisor for "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" died with reason shutdown [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Shutting down tap connections! [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:<0.18078.0>:ns_port_sup:restart_port:134] restarting port: {moxi, "/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p", "0", "-Y", "y", "-O", "stderr", []], [{env, [{"EVENT_NOSELECT", "1"}, {"MOXI_SASL_PLAIN_USR", "Administrator"}, {"MOXI_SASL_PLAIN_PWD", "password"}]}, use_stdio, exit_status, port_server_send_eol, stderr_to_stdout, stream]} [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:<0.17410.0>:menelaus_web:handle_streaming:950] menelaus_web streaming socket closed by client [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:<0.17745.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:<0.17745.0>:ns_port_server:log:166] moxi<0.17745.0>: EOL on stdin. Exiting [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:<0.18079.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [error_logger:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.18079.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [menelaus:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:<0.17726.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:11("Deleted bucket \"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276\"\n") because it's been seen 2 times in the past 2.367569 secs (last seen 1.283425 secs ago [menelaus:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:<0.17746.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" of type: membase [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:12("Created bucket \"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276\" of type: membase\n") because it's been seen 3 times in the past 3.656225 secs (last seen 1.283021 secs ago [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:<0.18091.0>:ns_janitor:wait_for_memcached:278] Waiting for "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [{num_replicas,3}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,13211}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:148] Starting moxi: {"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [], 13211, 8091} [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}, {single_bucket_sup, start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}, permanent, infinity, supervisor, [single_bucket_sup]} [error_logger:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.18096.0>}, {name, {per_bucket_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {single_bucket_sup,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [error_logger:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_moxi_sup} started: [{pid,<0.18093.0>}, {name, {"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [],13211,8091}}, {mfargs, {ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-B","auto","-z", "url=http://127.0.0.1:8091/pools/default/bucketsStreaming/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", "-Z", "port_listen=13211,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-p","0","-Y","y","-O","stderr"], [use_stdio,stderr_to_stdout, {env, [{"MOXI_SASL_PLAIN_USR", "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}, {"MOXI_SASL_PLAIN_PWD",[]}]}]]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [{num_replicas,3}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,13211}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'ns_memcached-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':ns_memcached:ensure_bucket:700] Created bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=209715200;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276;couch_port=11213;max_vbuckets=256;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.18099.0>}, {name, {ns_memcached,stats, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_memcached,start_link, [{"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.18111.0>}, {name, {ns_memcached,data, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_memcached,start_link, [{"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.18112.0>}, {name, {ns_vbm_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_vbm_sup,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.18113.0>}, {name, {ns_vbm_new_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_vbm_new_sup,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.18114.0>}, {name, {couch_stats_reader, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {couch_stats_reader,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.18115.0>}, {name, {stats_collector, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {stats_collector,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.18117.0>}, {name, {stats_archiver, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {stats_archiver,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.18119.0>}, {name, {stats_reader, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {stats_reader,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.18120.0>}, {name, {failover_safeness_level, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {failover_safeness_level,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.18098.0>}, {name, {ns_memcached_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {ns_memcached_sup,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.18121.0>}, {name, {capi_ddoc_replication_srv, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {capi_ddoc_replication_srv,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 0 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 1 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 2 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 3 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 4 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 5 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 6 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 7 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 8 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 9 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 10 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 11 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 12 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 13 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 14 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 15 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 16 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 17 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 18 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 19 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 20 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 21 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 22 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 23 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 24 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 25 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 26 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 27 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 28 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 29 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 30 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 31 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 32 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 33 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 34 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 35 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 36 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 37 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 38 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 39 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 40 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 41 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 42 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 43 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 44 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 45 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 46 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 47 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 48 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 49 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 50 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 51 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 52 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 53 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 54 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 55 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 56 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 57 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 58 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 59 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 60 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 61 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 62 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 63 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 64 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 65 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 66 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 67 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 68 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 69 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 70 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 71 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 72 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 73 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 74 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 75 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 76 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 77 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 78 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 79 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 80 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 81 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 82 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 83 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 84 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 85 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 86 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 87 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 88 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 89 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 90 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 91 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 92 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 93 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 94 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 95 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 96 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 97 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 98 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 99 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 100 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 101 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 102 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 103 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 104 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 105 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 106 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 107 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 108 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 109 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 110 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 111 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 112 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 113 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 114 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 115 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 116 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 117 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 118 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 119 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 120 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 121 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 122 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 123 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 124 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 125 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 126 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 127 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 128 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 129 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 130 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 131 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 132 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 133 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 134 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 135 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 136 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 137 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 138 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 139 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 140 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 141 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 142 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 143 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 144 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 145 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 146 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 147 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 148 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 149 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 150 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 151 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 152 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 153 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 154 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 155 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 156 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 157 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 158 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 159 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 160 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 161 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 162 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 163 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 164 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 165 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 166 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 167 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 168 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 169 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 170 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 171 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 172 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 173 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 174 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 175 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 176 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 177 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 178 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 179 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 180 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 181 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 182 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 183 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 184 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 185 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 186 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 187 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 188 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 189 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 190 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 191 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 192 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 193 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 194 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 195 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 196 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 197 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 198 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 199 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 200 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 201 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 202 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 203 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 204 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 205 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 206 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 207 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 208 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 209 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 210 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 211 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 212 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 213 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 214 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 215 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 216 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 217 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 218 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 219 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 220 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 221 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 222 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 223 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 224 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 225 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 226 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 227 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 228 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 229 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 230 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 231 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 232 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 233 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 234 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 235 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 236 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 237 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 238 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 239 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 240 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 241 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 242 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 243 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 244 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 245 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 246 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 247 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 248 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 249 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 250 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 251 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 252 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 253 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 254 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':couch_log:error:42] MC daemon: Error opening vb 255 in <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276">>: {not_found, no_db_file} [views:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':capi_set_view_manager:apply_map:445] Applying map to bucket new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [views:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'capi_set_view_manager-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':capi_set_view_manager:apply_map:450] Classified vbuckets for new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [error_logger:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276'} started: [{pid,<0.18135.0>}, {name, {capi_set_view_manager, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}}, {mfargs, {capi_set_view_manager,start_link, ["new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:<0.18080.0>:ns_port_server:log:166] moxi<0.18080.0>: 2012-03-26 01:04:16: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.18080.0>: 2012-03-26 01:04:16: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" with reason shutdown [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276"}, <0.18096.0>, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:<0.18096.0>:single_bucket_sup:top_loop:27] Delegating exit {'EXIT', <0.385.0>, shutdown} to child supervisor: <0.18097.0> [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:138] Killing unwanted moxi: {"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276", [], 13211, 8091} [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:<0.18093.0>:ns_port_server:log:166] moxi<0.18093.0>: 2012-03-26 01:04:16: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (47) moxi<0.18093.0>: 2012-03-26 01:04:18: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/bucketsStreaming/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({"name":"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276","bucketType":"membase","authType":"none","saslPassword":"","proxyPort":13211,"replicaIndex":true,"uri":"/pools/default/buckets/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276","streamingUri":"/pools/default/bucketsStreaming/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276","flushCacheUri":"/pools/default/buckets/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276/controller/doFlush","nodes":[{"couchApiBase":"http://127.0.0.1:8092/new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276","replication":0.0,"clusterMembership":"active","status":"warmup","thisNode":true,"hostname":"127.0.0.1:8091","clusterCompatibility":1,"version":"2.0.0r-944-rel-enterprise","os":"x86_64-unknown-linux-gnu","ports":{"prox [user:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:'ns_memcached-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':ns_memcached:terminate:348] Shutting down bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" on 'ns_1@127.0.0.1' for deletion [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log ns_memcached:2("Shutting down bucket \"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276\" on 'ns_1@127.0.0.1' for deletion") because it's been seen 3 times in the past 3.724506 secs (last seen 1.280046 secs ago [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:<0.17747.0>:menelaus_web:handle_streaming:943] closing streaming socket [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:16] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.366.0>: Failed to load mutation log, falling back to key dump memcached<0.366.0>: metadata loaded in 427 usec memcached<0.366.0>: warmup completed in 572 usec [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'ns_memcached-new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276':ns_storage_conf:delete_database:395] Deleting database <<"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276/master">>: ok [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:<0.18096.0>:single_bucket_sup:top_loop:24] per-bucket supervisor for "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" died with reason shutdown [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Shutting down tap connections! [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:<0.18411.0>:ns_port_sup:restart_port:134] restarting port: {moxi, "/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p", "0", "-Y", "y", "-O", "stderr", []], [{env, [{"EVENT_NOSELECT", "1"}, {"MOXI_SASL_PLAIN_USR", "Administrator"}, {"MOXI_SASL_PLAIN_PWD", "password"}]}, use_stdio, exit_status, port_server_send_eol, stderr_to_stdout, stream]} [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:<0.18080.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:<0.17037.0>:menelaus_web:handle_streaming:950] menelaus_web streaming socket closed by client [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:<0.18080.0>:ns_port_server:log:166] moxi<0.18080.0>: EOL on stdin. Exiting [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:<0.18412.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [menelaus:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:<0.17749.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276" [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:11("Deleted bucket \"new-bucket-8da9a67c-af52-433d-b0c1-b59abf915276\"\n") because it's been seen 3 times in the past 3.650895 secs (last seen 1.283326 secs ago [error_logger:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.18412.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [menelaus:warn] [2012-03-26 1:04:17] [ns_1@127.0.0.1:<0.18110.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:14.976460 : test_default_moxi started [menelaus:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:<0.18398.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:12("Created bucket \"default\" of type: membase\n") because it's been seen 6 times in the past 73.132691 secs (last seen 6.351895 secs ago [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:<0.18429.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "default"}, {single_bucket_sup, start_link, ["default"]}, permanent, infinity, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [error_logger:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.18434.0>}, {name,{per_bucket_sup,"default"}}, {mfargs,{single_bucket_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [stats:error] [2012-03-26 1:04:17] [ns_1@127.0.0.1:<0.17750.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:ensure_bucket:700] Created bucket "default" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=209715200;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/default;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=default;couch_port=11213;max_vbuckets=256;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.18438.0>}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.18448.0>}, {name,{ns_memcached,data,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.18450.0>}, {name,{ns_vbm_sup,"default"}}, {mfargs,{ns_vbm_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.18451.0>}, {name,{ns_vbm_new_sup,"default"}}, {mfargs,{ns_vbm_new_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.18452.0>}, {name,{couch_stats_reader,"default"}}, {mfargs,{couch_stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.18453.0>}, {name,{stats_collector,"default"}}, {mfargs,{stats_collector,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.18455.0>}, {name,{stats_archiver,"default"}}, {mfargs,{stats_archiver,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [error_logger:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.18457.0>}, {name,{stats_reader,"default"}}, {mfargs,{stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.18458.0>}, {name,{failover_safeness_level,"default"}}, {mfargs, {failover_safeness_level,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.18437.0>}, {name,{ns_memcached_sup,"default"}}, {mfargs,{ns_memcached_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [error_logger:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.18459.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 0 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 1 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 2 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 3 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 4 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 5 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 6 in <<"default">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason shutdown [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 7 in <<"default">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "default"}, <0.18434.0>, supervisor, [single_bucket_sup]} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 8 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 9 in <<"default">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:<0.18413.0>:ns_port_server:log:166] moxi<0.18413.0>: 2012-03-26 01:04:17: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.18413.0>: 2012-03-26 01:04:17: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) moxi<0.18413.0>: 2012-03-26 01:04:19: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18413.0>: "name": "default", moxi<0.18413.0>: "nodeLocator": "vbucket", moxi<0.18413.0>: "saslPassword": "", moxi<0.18413.0>: "nodes": [{ moxi<0.18413.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18413.0>: "replication": 0, moxi<0.18413.0>: "clusterMembership": "active", moxi<0.18413.0>: "status": "warmup", moxi<0.18413.0>: "thisNode": true, moxi<0.18413.0>: "hostname": "127.0.0.1:8091", moxi<0.18413.0>: "clusterCompatibility": 1, moxi<0.18413.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18413.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18413.0>: "ports": { moxi<0.18413.0>: "proxy": 11211, moxi<0.18413.0>: "direct": 11210 moxi<0.18413.0>: } moxi<0.18413.0>: }], moxi<0.18413.0>: "vBucketServerMap": { moxi<0.18413.0>: "hashAlgorithm": "CRC", moxi<0.18413.0>: "numReplicas": 1, moxi<0.18413.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18413.0>: "vBucketMap": [] moxi<0.18413.0>: } moxi<0.18413.0>: }) [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 10 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 11 in <<"default">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 12 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 13 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 14 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 15 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 16 in <<"default">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 17 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 18 in <<"default">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 19 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 20 in <<"default">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 21 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 22 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 23 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 24 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 25 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 26 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 27 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 28 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 29 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 30 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 31 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 32 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 33 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 34 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 35 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 36 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 37 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 38 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 39 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 40 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 41 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 42 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 43 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 44 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 45 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 46 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 47 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 48 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 49 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 50 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 51 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 52 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 53 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 54 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 55 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 56 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 57 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 58 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 59 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 60 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 61 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 62 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 63 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 64 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 65 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 66 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 67 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 68 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 69 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 70 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 71 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 72 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 73 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 74 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 75 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 76 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 77 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 78 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 79 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 80 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 81 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 82 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 83 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 84 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 85 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 86 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 87 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 88 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 89 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 90 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 91 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 92 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 93 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 94 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 95 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 96 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 97 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 98 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 99 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 100 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 101 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 102 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 103 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 104 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 105 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 106 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 107 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 108 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 109 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 110 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 111 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 112 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 113 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 114 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 115 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 116 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 117 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 118 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 119 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 120 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 121 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 122 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 123 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 124 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 125 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 126 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 127 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 128 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 129 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 130 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 131 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 132 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 133 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 134 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 135 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 136 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 137 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 138 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 139 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 140 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 141 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 142 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 143 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 144 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 145 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 146 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 147 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 148 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 149 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 150 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 151 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 152 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 153 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 154 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 155 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 156 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 157 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 158 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 159 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 160 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 161 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 162 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 163 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 164 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 165 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 166 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 167 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 168 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 169 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 170 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 171 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 172 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 173 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 174 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 175 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 176 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 177 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 178 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 179 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 180 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 181 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 182 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 183 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 184 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 185 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 186 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 187 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 188 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 189 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 190 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 191 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 192 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 193 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 194 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 195 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 196 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 197 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 198 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 199 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 200 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 201 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 202 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 203 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 204 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 205 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 206 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 207 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 208 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 209 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 210 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 211 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 212 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 213 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 214 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 215 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 216 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 217 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 218 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 219 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 220 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 221 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 222 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 223 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 224 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 225 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 226 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 227 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 228 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 229 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 230 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 231 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 232 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 233 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 234 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 235 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 236 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 237 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 238 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 239 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 240 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 241 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 242 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 243 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 244 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 245 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 246 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 247 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 248 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 249 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 250 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 251 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 252 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 253 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 254 in <<"default">>: {not_found, no_db_file} [views:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [views:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [couchdb:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 255 in <<"default">>: {not_found, no_db_file} [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:<0.18434.0>:single_bucket_sup:top_loop:27] Delegating exit {'EXIT', <0.385.0>, shutdown} to child supervisor: <0.18435.0> [error_logger:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.18476.0>}, {name,{capi_set_view_manager,"default"}}, {mfargs,{capi_set_view_manager,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [user:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:terminate:348] Shutting down bucket "default" on 'ns_1@127.0.0.1' for deletion [ns_server:info] [2012-03-26 1:04:17] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log ns_memcached:2("Shutting down bucket \"default\" on 'ns_1@127.0.0.1' for deletion") because it's been seen 6 times in the past 61.653428 secs (last seen 6.262597 secs ago [ns_server:info] [2012-03-26 1:04:18] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.366.0>: Failed to load mutation log, falling back to key dump memcached<0.366.0>: metadata loaded in 187 usec memcached<0.366.0>: warmup completed in 403 usec [ns_server:info] [2012-03-26 1:04:18] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/master">>: ok [ns_server:info] [2012-03-26 1:04:18] [ns_1@127.0.0.1:<0.18434.0>:single_bucket_sup:top_loop:24] per-bucket supervisor for "default" died with reason shutdown [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Shutting down tap connections! [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:<0.18750.0>:ns_port_sup:restart_port:134] restarting port: {moxi, "/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p", "0", "-Y", "y", "-O", "stderr", []], [{env, [{"EVENT_NOSELECT", "1"}, {"MOXI_SASL_PLAIN_USR", "Administrator"}, {"MOXI_SASL_PLAIN_PWD", "password"}]}, use_stdio, exit_status, port_server_send_eol, stderr_to_stdout, stream]} [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:<0.18082.0>:menelaus_web:handle_streaming:950] menelaus_web streaming socket closed by client [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:<0.18413.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:<0.18413.0>:ns_port_server:log:166] moxi<0.18413.0>: EOL on stdin. Exiting [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:<0.18751.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [menelaus:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:<0.17752.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [error_logger:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.18751.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:11("Deleted bucket \"default\"\n") because it's been seen 6 times in the past 61.690661 secs (last seen 6.349373 secs ago [menelaus:warn] [2012-03-26 1:04:19] [ns_1@127.0.0.1:<0.18065.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:16.217940 : test_default_moxi finished [menelaus:warn] [2012-03-26 1:04:19] [ns_1@127.0.0.1:<0.18415.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:16.234558 : test_default_case_sensitive_dedicated started [menelaus:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:<0.18418.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "Default" of type: membase [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:<0.18764.0>:ns_janitor:wait_for_memcached:278] Waiting for "Default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"Default", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {sasl_password,"test_non_default"}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "Default"}, {single_bucket_sup, start_link, ["Default"]}, permanent, infinity, supervisor, [single_bucket_sup]} [error_logger:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.18768.0>}, {name,{per_bucket_sup,"Default"}}, {mfargs,{single_bucket_sup,start_link,["Default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"Default", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {sasl_password,"test_non_default"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [stats:error] [2012-03-26 1:04:19] [ns_1@127.0.0.1:<0.18421.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:'ns_memcached-Default':ns_memcached:ensure_bucket:700] Created bucket "Default" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=209715200;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/Default;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=Default;couch_port=11213;max_vbuckets=256;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-Default'} started: [{pid,<0.18771.0>}, {name,{ns_memcached,stats,"Default"}}, {mfargs,{ns_memcached,start_link,[{"Default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [error_logger:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-Default'} started: [{pid,<0.18782.0>}, {name,{ns_memcached,data,"Default"}}, {mfargs,{ns_memcached,start_link,[{"Default",data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-Default'} started: [{pid,<0.18785.0>}, {name,{ns_vbm_sup,"Default"}}, {mfargs,{ns_vbm_sup,start_link,["Default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-Default'} started: [{pid,<0.18786.0>}, {name,{ns_vbm_new_sup,"Default"}}, {mfargs,{ns_vbm_new_sup,start_link,["Default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-Default'} started: [{pid,<0.18787.0>}, {name,{couch_stats_reader,"Default"}}, {mfargs,{couch_stats_reader,start_link,["Default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-Default'} started: [{pid,<0.18788.0>}, {name,{stats_collector,"Default"}}, {mfargs,{stats_collector,start_link,["Default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-Default'} started: [{pid,<0.18790.0>}, {name,{stats_archiver,"Default"}}, {mfargs,{stats_archiver,start_link,["Default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_call:109] Creating table 'stats_archiver-Default-minute' [error_logger:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-Default'} started: [{pid,<0.18792.0>}, {name,{stats_reader,"Default"}}, {mfargs,{stats_reader,start_link,["Default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-Default'} started: [{pid,<0.18795.0>}, {name,{failover_safeness_level,"Default"}}, {mfargs, {failover_safeness_level,start_link,["Default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-Default'} started: [{pid,<0.18770.0>}, {name,{ns_memcached_sup,"Default"}}, {mfargs,{ns_memcached_sup,start_link,["Default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [user:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:'ns_memcached-Default':ns_memcached:terminate:348] Shutting down bucket "Default" on 'ns_1@127.0.0.1' for server shutdown [error_logger:error] [2012-03-26 1:04:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: cb_generic_replication_srv:init/1 pid: <0.18796.0> registered_name: [] exception exit: {{case_clause,{error,illegal_database_name}}, [{capi_ddoc_replication_srv,open_local_db,1}, {cb_generic_replication_srv,init,1}, {gen_server,init_it,6}, {proc_lib,init_p_do_apply,3}]} in function gen_server:init_it/6 ancestors: ['single_bucket_sup-Default',<0.18768.0>] messages: [] links: [<0.18769.0>,<0.18797.0>] dictionary: [] trap_exit: false status: running heap_size: 233 stack_size: 24 reductions: 127 neighbours: [error_logger:error] [2012-03-26 1:04:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,'single_bucket_sup-Default'} Context: start_error Reason: {{case_clause,{error,illegal_database_name}}, [{capi_ddoc_replication_srv,open_local_db,1}, {cb_generic_replication_srv,init,1}, {gen_server,init_it,6}, {proc_lib,init_p_do_apply,3}]} Offender: [{pid,undefined}, {name,{capi_ddoc_replication_srv,"Default"}}, {mfargs,{capi_ddoc_replication_srv,start_link,["Default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [stats:error] [2012-03-26 1:04:19] [ns_1@127.0.0.1:<0.18422.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "Default" with reason shutdown [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-Default-minute', [{name,'stats_archiver-Default-minute'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749059,98851},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,358,<0.18794.0>}} [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:mb_mnesia:mb_mnesia:handle_info:215] Mnesia table event: {write,{schema,'stats_archiver-Default-minute', [{name,'stats_archiver-Default-minute'}, {type,ordered_set}, {ram_copies,[]}, {disc_copies,['ns_1@127.0.0.1']}, {disc_only_copies,[]}, {load_order,0}, {access_mode,read_write}, {majority,false}, {index,[]}, {snmp,[]}, {local_content,true}, {record_name,stat_entry}, {attributes,[timestamp,values]}, {user_properties,[]}, {frag_properties,[]}, {cookie,{{1332,749059,98851},'ns_1@127.0.0.1'}}, {version,{{2,0},[]}}]}, {tid,358,<0.18794.0>}} [stats:error] [2012-03-26 1:04:19] [ns_1@127.0.0.1:<0.18423.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "Default"}, <0.18768.0>, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:<0.18752.0>:ns_port_server:log:166] moxi<0.18752.0>: 2012-03-26 01:04:19: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.18752.0>: 2012-03-26 01:04:19: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) moxi<0.18752.0>: 2012-03-26 01:04:21: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18752.0>: "name": "Default", moxi<0.18752.0>: "nodeLocator": "vbucket", moxi<0.18752.0>: "saslPassword": "test_non_default", moxi<0.18752.0>: "nodes": [{ moxi<0.18752.0>: "couchApiBase": "http://127.0.0.1:8092/Default", moxi<0.18752.0>: "replication": 0, moxi<0.18752.0>: "clusterMembership": "active", moxi<0.18752.0>: "status": "warmup", moxi<0.18752.0>: "thisNode": true, moxi<0.18752.0>: "hostname": "127.0.0.1:8091", moxi<0.18752.0>: "clusterCompatibility": 1, moxi<0.18752.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18752.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18752.0>: "ports": { moxi<0.18752.0>: "proxy": 11211, moxi<0.18752.0>: "direct": 11210 moxi<0.18752.0>: } moxi<0.18752.0>: }], moxi<0.18752.0>: "vBucketServerMap": { moxi<0.18752.0>: "hashAlgorithm": "CRC", moxi<0.18752.0>: "numReplicas": 1, moxi<0.18752.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18752.0>: "vBucketMap": [] moxi<0.18752.0>: } moxi<0.18752.0>: }) [ns_server:info] [2012-03-26 1:04:19] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Warning: Data diretory does not exist, /opt/couchbase/var/lib/couchdb/Default memcached<0.366.0>: Failed to load mutation log, falling back to key dump memcached<0.366.0>: Warning: Data diretory is empty, /opt/couchbase/var/lib/couchdb/Default memcached<0.366.0>: metadata loaded in 169 usec memcached<0.366.0>: Warning: Data diretory is empty, /opt/couchbase/var/lib/couchdb/Default memcached<0.366.0>: warmup completed in 326 usec memcached<0.366.0>: Extension support isn't implemented in this version of bucket_engine [ns_server:info] [2012-03-26 1:04:20] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Shutting down tap connections! [ns_server:warn] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "Default" within expected time. [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18424.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "Default" [menelaus:warn] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18754.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:21.307852 : test_default_case_sensitive_dedicated finished [menelaus:warn] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18756.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:21.322927 : test_default_on_non_default_port started [menelaus:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18758.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:12("Created bucket \"default\" of type: membase\n") because it's been seen 7 times in the past 79.479309 secs (last seen 6.346618 secs ago [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18834.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [stats:error] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18759.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18760.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason shutdown [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18856.0>:ns_port_sup:restart_port:134] restarting port: {moxi, "/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p", "0", "-Y", "y", "-O", "stderr", []], [{env, [{"EVENT_NOSELECT", "1"}, {"MOXI_SASL_PLAIN_USR", "Administrator"}, {"MOXI_SASL_PLAIN_PWD", "password"}]}, use_stdio, exit_status, port_server_send_eol, stderr_to_stdout, stream]} [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18063.0>:menelaus_web:handle_streaming:950] menelaus_web streaming socket closed by client [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18752.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18752.0>:ns_port_server:log:166] moxi<0.18752.0>: 2012-03-26 01:04:26: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18752.0>: "name": "default", moxi<0.18752.0>: "nodeLocator": "vbucket", moxi<0.18752.0>: "saslPassword": "", moxi<0.18752.0>: "nodes": [{ moxi<0.18752.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18752.0>: "replication": 0, moxi<0.18752.0>: "clusterMembership": "active", moxi<0.18752.0>: "status": "warmup", moxi<0.18752.0>: "thisNode": true, moxi<0.18752.0>: "hostname": "127.0.0.1:8091", moxi<0.18752.0>: "clusterCompatibility": 1, moxi<0.18752.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18752.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18752.0>: "ports": { moxi<0.18752.0>: "proxy": 11211, moxi<0.18752.0>: "direct": 11210 moxi<0.18752.0>: } moxi<0.18752.0>: }], moxi<0.18752.0>: "vBucketServerMap": { moxi<0.18752.0>: "hashAlgorithm": "CRC", moxi<0.18752.0>: "numReplicas": 1, moxi<0.18752.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18752.0>: "vBucketMap": [] moxi<0.18752.0>: } moxi<0.18752.0>: }) moxi<0.18752.0>: EOL on stdin. Exiting [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18857.0>:supervisor_cushion:init:43] starting ns_port_server with delay of 5000 [error_logger:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.18857.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [menelaus:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18773.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:11("Deleted bucket \"default\"\n") because it's been seen 7 times in the past 66.951789 secs (last seen 5.261128 secs ago [menelaus:warn] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18807.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:21.477373 : test_default_on_non_default_port finished [menelaus:warn] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18433.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:21.493706 : test_non_default_moxi started [menelaus:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18487.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "test_non_default" of type: membase [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:148] Starting moxi: {"test_non_default", [], 11611, 8091} [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"test_non_default", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,11611}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18869.0>:ns_janitor:wait_for_memcached:278] Waiting for "test_non_default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [error_logger:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_moxi_sup} started: [{pid,<0.18871.0>}, {name,{"test_non_default",[],11611,8091}}, {mfargs, {ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-B","auto","-z", "url=http://127.0.0.1:8091/pools/default/bucketsStreaming/test_non_default", "-Z", "port_listen=11611,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-p","0","-Y","y","-O","stderr"], [use_stdio,stderr_to_stdout, {env, [{"MOXI_SASL_PLAIN_USR", "test_non_default"}, {"MOXI_SASL_PLAIN_PWD",[]}]}]]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"test_non_default", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,11611}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18825.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18827.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "test_non_default" with reason shutdown [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:138] Killing unwanted moxi: {"test_non_default", [], 11611, 8091} [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18871.0>:ns_port_server:log:166] moxi<0.18871.0>: 2012-03-26 01:04:24: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (16) moxi<0.18871.0>: 2012-03-26 01:04:26: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/bucketsStreaming/test_non_default: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({"name":"test_non_default","bucketType":"membase","authType":"none","saslPassword":"","proxyPort":11611,"replicaIndex":true,"uri":"/pools/default/buckets/test_non_default","streamingUri":"/pools/default/bucketsStreaming/test_non_default","flushCacheUri":"/pools/default/buckets/test_non_default/controller/doFlush","nodes":[{"couchApiBase":"http://127.0.0.1:8092/test_non_default","replication":0.0,"clusterMembership":"active","status":"warmup","thisNode":true,"hostname":"127.0.0.1:8091","clusterCompatibility":1,"version":"2.0.0r-944-rel-enterprise","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"stats":{"uri":"/pools/default/buckets/test_non_default/stats","directoryURI":"/pools/default/buckets/test_non_default/statsDirectory","nodeStatsListURI":"/poo [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18826.0>:menelaus_web:handle_streaming:943] closing streaming socket [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:24] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:04:24: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.18858.0>: 2012-03-26 01:04:24: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) [ns_server:warn] [2012-03-26 1:04:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "test_non_default" within expected time. [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:<0.18828.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "test_non_default" [menelaus:warn] [2012-03-26 1:04:29] [ns_1@127.0.0.1:<0.18830.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:26.540987 : test_non_default_moxi finished [menelaus:warn] [2012-03-26 1:04:29] [ns_1@127.0.0.1:<0.18844.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:26.558687 : test_default_case_sensitive_different_ports started [menelaus:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:<0.18860.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:12("Created bucket \"default\" of type: membase\n") because it's been seen 8 times in the past 84.715599 secs (last seen 5.23629 secs ago [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:<0.18909.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [stats:error] [2012-03-26 1:04:29] [ns_1@127.0.0.1:<0.18859.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:04:29] [ns_1@127.0.0.1:<0.18861.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason shutdown [stats:error] [2012-03-26 1:04:29] [ns_1@127.0.0.1:<0.18862.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:29] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:04:31: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [ns_server:warn] [2012-03-26 1:04:34] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:<0.18863.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:11("Deleted bucket \"default\"\n") because it's been seen 8 times in the past 77.095038 secs (last seen 10.143249 secs ago [menelaus:warn] [2012-03-26 1:04:34] [ns_1@127.0.0.1:<0.18901.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:31.622372 : test_default_case_sensitive_different_ports finished [menelaus:warn] [2012-03-26 1:04:34] [ns_1@127.0.0.1:<0.18903.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:31.638712 : test_non_default_case_sensitive_different_port started [menelaus:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:<0.18905.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d" of type: membase [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:<0.18952.0>:ns_janitor:wait_for_memcached:278] Waiting for "uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,11711}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:148] Starting moxi: {"uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d", [], 11711, 8091} [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,11711}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [error_logger:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_moxi_sup} started: [{pid,<0.18954.0>}, {name, {"uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d", [],11711,8091}}, {mfargs, {ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-B","auto","-z", "url=http://127.0.0.1:8091/pools/default/bucketsStreaming/uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d", "-Z", "port_listen=11711,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-p","0","-Y","y","-O","stderr"], [use_stdio,stderr_to_stdout, {env, [{"MOXI_SASL_PLAIN_USR", "uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d"}, {"MOXI_SASL_PLAIN_PWD",[]}]}]]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:04:34] [ns_1@127.0.0.1:<0.18919.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:04:34] [ns_1@127.0.0.1:<0.18921.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d" with reason shutdown [stats:error] [2012-03-26 1:04:34] [ns_1@127.0.0.1:<0.18923.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:138] Killing unwanted moxi: {"uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d", [], 11711, 8091} [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:<0.18954.0>:ns_port_server:log:166] moxi<0.18954.0>: 2012-03-26 01:04:34: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (46) moxi<0.18954.0>: 2012-03-26 01:04:36: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/bucketsStreaming/uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({"name":"uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d","bucketType":"membase","authType":"none","saslPassword":"","proxyPort":11711,"replicaIndex":true,"uri":"/pools/default/buckets/uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d","streamingUri":"/pools/default/bucketsStreaming/uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d","flushCacheUri":"/pools/default/buckets/uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d/controller/doFlush","nodes":[{"couchApiBase":"http://127.0.0.1:8092/uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d","replication":0.0,"clusterMembership":"active","status":"warmup","thisNode":true,"hostname":"127.0.0.1:8091","clusterCompatibility":1,"version":"2.0.0r-944-rel-enterprise","os":"x86_64-unknown-linux-gnu","ports":{"proxy":112 [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:<0.18911.0>:menelaus_web:handle_streaming:943] closing streaming socket [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:34] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:warn] [2012-03-26 1:04:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d" within expected time. [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:<0.18864.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d" [menelaus:warn] [2012-03-26 1:04:39] [ns_1@127.0.0.1:<0.18875.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:36.712836 : test_non_default_case_sensitive_different_port finished [menelaus:warn] [2012-03-26 1:04:39] [ns_1@127.0.0.1:<0.18881.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:36.730182 : test_non_default_case_sensitive_same_port started [menelaus:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:<0.18943.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1" of type: membase [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:<0.18996.0>:ns_janitor:wait_for_memcached:278] Waiting for "uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:148] Starting moxi: {"uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1", [], 11311, 8091} [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,11311}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,none}, {moxi_port,11311}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [error_logger:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_moxi_sup} started: [{pid,<0.18998.0>}, {name, {"uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1", [],11311,8091}}, {mfargs, {ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-B","auto","-z", "url=http://127.0.0.1:8091/pools/default/bucketsStreaming/uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1", "-Z", "port_listen=11311,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-p","0","-Y","y","-O","stderr"], [use_stdio,stderr_to_stdout, {env, [{"MOXI_SASL_PLAIN_USR", "uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1"}, {"MOXI_SASL_PLAIN_PWD",[]}]}]]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [stats:error] [2012-03-26 1:04:39] [ns_1@127.0.0.1:<0.18944.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:04:39] [ns_1@127.0.0.1:<0.18946.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:04:39] [ns_1@127.0.0.1:<0.18947.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1" with reason shutdown [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:ns_moxi_sup_work_queue:ns_moxi_sup:do_notify:138] Killing unwanted moxi: {"uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1", [], 11311, 8091} [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:<0.18998.0>:ns_port_server:log:166] moxi<0.18998.0>: 2012-03-26 01:04:39: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (46) moxi<0.18998.0>: 2012-03-26 01:04:41: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/bucketsStreaming/uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({"name":"uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1","bucketType":"membase","authType":"none","saslPassword":"","proxyPort":11311,"replicaIndex":true,"uri":"/pools/default/buckets/uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1","streamingUri":"/pools/default/bucketsStreaming/uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1","flushCacheUri":"/pools/default/buckets/uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1/controller/doFlush","nodes":[{"couchApiBase":"http://127.0.0.1:8092/uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1","replication":0.0,"clusterMembership":"active","status":"warmup","thisNode":true,"hostname":"127.0.0.1:8091","clusterCompatibility":1,"version":"2.0.0r-944-rel-enterprise","os":"x86_64-unknown-linux-gnu","ports":{"proxy":112 [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:<0.18945.0>:menelaus_web:handle_streaming:943] closing streaming socket [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:39] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:warn] [2012-03-26 1:04:44] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1" within expected time. [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:<0.18948.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "uppercase_0574f3d5-6f2b-4124-93be-a218fa5ad7d1" [menelaus:warn] [2012-03-26 1:04:44] [ns_1@127.0.0.1:<0.18988.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:41.797922 : test_non_default_case_sensitive_same_port finished [menelaus:warn] [2012-03-26 1:04:44] [ns_1@127.0.0.1:<0.18990.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:41.816644 : test_less_than_minimum_memory_quota started [menelaus:warn] [2012-03-26 1:04:44] [ns_1@127.0.0.1:<0.19012.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:41.862528 : test_less_than_minimum_memory_quota finished [menelaus:warn] [2012-03-26 1:04:44] [ns_1@127.0.0.1:<0.18961.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:41.882333 : test_max_memory_quota started [menelaus:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:<0.19031.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "maxquota_3487d343-7aa8-42fc-b87f-48fde83bf81b" of type: membase [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:<0.19051.0>:ns_janitor:wait_for_memcached:278] Waiting for "maxquota_3487d343-7aa8-42fc-b87f-48fde83bf81b" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"maxquota_3487d343-7aa8-42fc-b87f-48fde83bf81b", [{num_replicas,1}, {replica_index,true}, {ram_quota,3231711232}, {auth_type,sasl}, {sasl_password,[]}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:04:44] [ns_1@127.0.0.1:<0.19032.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"maxquota_3487d343-7aa8-42fc-b87f-48fde83bf81b", [{num_replicas,1}, {replica_index,true}, {ram_quota,3231711232}, {auth_type,sasl}, {sasl_password,[]}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:04:44] [ns_1@127.0.0.1:<0.19033.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "maxquota_3487d343-7aa8-42fc-b87f-48fde83bf81b" with reason shutdown [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:44] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:04:46: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "maxquota_3487d343-7aa8-42fc-b87f-48fde83bf81b", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/maxquota_3487d343-7aa8-42fc-b87f-48fde83bf81b", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [ns_server:warn] [2012-03-26 1:04:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "maxquota_3487d343-7aa8-42fc-b87f-48fde83bf81b" within expected time. [ns_server:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:<0.19034.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "maxquota_3487d343-7aa8-42fc-b87f-48fde83bf81b" [menelaus:warn] [2012-03-26 1:04:49] [ns_1@127.0.0.1:<0.19036.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:46.946723 : test_max_memory_quota finished [menelaus:warn] [2012-03-26 1:04:49] [ns_1@127.0.0.1:<0.19038.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:46.963728 : test_negative_replica started [menelaus:warn] [2012-03-26 1:04:49] [ns_1@127.0.0.1:<0.19043.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:46.987689 : test_negative_replica finished [menelaus:warn] [2012-03-26 1:04:49] [ns_1@127.0.0.1:<0.19045.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:47.010309 : test_zero_replica started [menelaus:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:<0.19054.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "replica_6230f71c-ecf1-4726-a467-4a5f639d9418" of type: membase [ns_server:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:<0.19102.0>:ns_janitor:wait_for_memcached:278] Waiting for "replica_6230f71c-ecf1-4726-a467-4a5f639d9418" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"replica_6230f71c-ecf1-4726-a467-4a5f639d9418", [{num_replicas,0}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {sasl_password,[]}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"replica_6230f71c-ecf1-4726-a467-4a5f639d9418", [{num_replicas,0}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {sasl_password,[]}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:04:49] [ns_1@127.0.0.1:<0.19061.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:04:49] [ns_1@127.0.0.1:<0.19063.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "replica_6230f71c-ecf1-4726-a467-4a5f639d9418" with reason shutdown [ns_server:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:49] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:50] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:04:51: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "replica_6230f71c-ecf1-4726-a467-4a5f639d9418", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/replica_6230f71c-ecf1-4726-a467-4a5f639d9418", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 0, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [ns_server:warn] [2012-03-26 1:04:54] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "replica_6230f71c-ecf1-4726-a467-4a5f639d9418" within expected time. [ns_server:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:<0.19081.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "replica_6230f71c-ecf1-4726-a467-4a5f639d9418" [menelaus:warn] [2012-03-26 1:04:54] [ns_1@127.0.0.1:<0.19085.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:52.061010 : test_zero_replica finished [menelaus:warn] [2012-03-26 1:04:54] [ns_1@127.0.0.1:<0.19087.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:52.079467 : test_one_replica started [menelaus:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:<0.19090.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "replica_233ef9bf-7182-4f82-a061-fe4d447bdffd" of type: membase [ns_server:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:<0.19143.0>:ns_janitor:wait_for_memcached:278] Waiting for "replica_233ef9bf-7182-4f82-a061-fe4d447bdffd" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"replica_233ef9bf-7182-4f82-a061-fe4d447bdffd", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {sasl_password,[]}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"replica_233ef9bf-7182-4f82-a061-fe4d447bdffd", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {sasl_password,[]}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [stats:error] [2012-03-26 1:04:54] [ns_1@127.0.0.1:<0.19093.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:04:54] [ns_1@127.0.0.1:<0.19094.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "replica_233ef9bf-7182-4f82-a061-fe4d447bdffd" with reason shutdown [ns_server:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:04:54] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:04:55] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:04:56: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "replica_233ef9bf-7182-4f82-a061-fe4d447bdffd", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/replica_233ef9bf-7182-4f82-a061-fe4d447bdffd", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [ns_doctor:info] [2012-03-26 1:04:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,749099,305419}}, {outgoing_replications_safeness_level,[]}, {incoming_replications_conf_hashes,[]}, {replication,[]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,36536080}, {processes,10029136}, {processes_used,8406576}, {system,26506944}, {atom,1305873}, {atom_used,1282827}, {binary,297144}, {code,12859877}, {ets,1649808}]}, {system_stats, [{cpu_utilization_rate,24.87437185929648}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,121}, {memory_data,{4040077312,4006981632,{<0.18805.0>,372136}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 32692 kB\nBuffers: 53704 kB\nCached: 3531004 kB\nSwapCached: 0 kB\nActive: 302424 kB\nInactive: 3441832 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 32692 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 116 kB\nWriteback: 0 kB\nAnonPages: 159544 kB\nMapped: 24856 kB\nSlab: 134804 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577628 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3615748096}, {buffered_memory,54992896}, {free_memory,33476608}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{116042,0}}, {context_switches,{421989,0}}, {garbage_collection,{201072,180765790,0}}, {io,{{input,15663563},{output,11158593}}}, {reductions,{102556763,985501}}, {run_queue,0}, {runtime,{14460,260}}]}]}] [ns_server:warn] [2012-03-26 1:04:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "replica_233ef9bf-7182-4f82-a061-fe4d447bdffd" within expected time. [ns_server:info] [2012-03-26 1:04:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:04:59] [ns_1@127.0.0.1:<0.19095.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "replica_233ef9bf-7182-4f82-a061-fe4d447bdffd" [menelaus:warn] [2012-03-26 1:04:59] [ns_1@127.0.0.1:<0.19134.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:57.128972 : test_one_replica finished [menelaus:warn] [2012-03-26 1:04:59] [ns_1@127.0.0.1:<0.19136.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:07:57.154705 : test_two_replica started [menelaus:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:<0.19139.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "2replica_3f02a3c1-c872-48c2-ae29-9732ab639a32" of type: membase [ns_server:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:<0.19186.0>:ns_janitor:wait_for_memcached:278] Waiting for "2replica_3f02a3c1-c872-48c2-ae29-9732ab639a32" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"2replica_3f02a3c1-c872-48c2-ae29-9732ab639a32", [{num_replicas,2}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {sasl_password,[]}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"2replica_3f02a3c1-c872-48c2-ae29-9732ab639a32", [{num_replicas,2}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {sasl_password,[]}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [stats:error] [2012-03-26 1:05:00] [ns_1@127.0.0.1:<0.19147.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:05:00] [ns_1@127.0.0.1:<0.19153.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "2replica_3f02a3c1-c872-48c2-ae29-9732ab639a32" with reason shutdown [ns_server:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:05:00] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:05:01: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "2replica_3f02a3c1-c872-48c2-ae29-9732ab639a32", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/2replica_3f02a3c1-c872-48c2-ae29-9732ab639a32", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 2, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [ns_server:warn] [2012-03-26 1:05:05] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "2replica_3f02a3c1-c872-48c2-ae29-9732ab639a32" within expected time. [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:<0.19155.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "2replica_3f02a3c1-c872-48c2-ae29-9732ab639a32" [menelaus:warn] [2012-03-26 1:05:05] [ns_1@127.0.0.1:<0.19097.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:08:02.209981 : test_two_replica finished [menelaus:warn] [2012-03-26 1:05:05] [ns_1@127.0.0.1:<0.19107.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:08:02.228242 : test_three_replica started [menelaus:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:<0.19176.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "3replica_c797939c-36a5-4419-ba51-b6dfa61acf3c" of type: membase [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:<0.19225.0>:ns_janitor:wait_for_memcached:278] Waiting for "3replica_c797939c-36a5-4419-ba51-b6dfa61acf3c" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"3replica_c797939c-36a5-4419-ba51-b6dfa61acf3c", [{num_replicas,3}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {sasl_password,[]}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"3replica_c797939c-36a5-4419-ba51-b6dfa61acf3c", [{num_replicas,3}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {sasl_password,[]}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [stats:error] [2012-03-26 1:05:05] [ns_1@127.0.0.1:<0.19177.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:05:05] [ns_1@127.0.0.1:<0.19178.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "3replica_c797939c-36a5-4419-ba51-b6dfa61acf3c" with reason shutdown [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:05:05] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:05:06: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "3replica_c797939c-36a5-4419-ba51-b6dfa61acf3c", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/3replica_c797939c-36a5-4419-ba51-b6dfa61acf3c", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 3, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [ns_server:warn] [2012-03-26 1:05:10] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "3replica_c797939c-36a5-4419-ba51-b6dfa61acf3c" within expected time. [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:<0.19179.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "3replica_c797939c-36a5-4419-ba51-b6dfa61acf3c" [menelaus:warn] [2012-03-26 1:05:10] [ns_1@127.0.0.1:<0.19216.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:08:07.277616 : test_three_replica finished [menelaus:warn] [2012-03-26 1:05:10] [ns_1@127.0.0.1:<0.19218.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:08:07.295557 : test_four_replica started [menelaus:warn] [2012-03-26 1:05:10] [ns_1@127.0.0.1:<0.19235.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:08:07.321109 : test_four_replica finished [menelaus:warn] [2012-03-26 1:05:10] [ns_1@127.0.0.1:<0.19180.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:08:07.340865 : test_valid_chars started [menelaus:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:<0.19191.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.%" of type: membase [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:<0.19277.0>:ns_janitor:wait_for_memcached:278] Waiting for "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.%" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.%", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {sasl_password,[]}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.%", [{num_replicas,1}, {replica_index,true}, {ram_quota,209715200}, {auth_type,sasl}, {sasl_password,[]}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [stats:error] [2012-03-26 1:05:10] [ns_1@127.0.0.1:<0.19196.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:05:10] [ns_1@127.0.0.1:<0.19198.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.%" with reason shutdown [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:05:10] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:05:12: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.%", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.%25", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [ns_server:warn] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.%" within expected time. [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19260.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.%" [menelaus:warn] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19262.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:08:12.399391 : test_valid_chars finished [menelaus:warn] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19264.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:08:12.427154 : test_invalid_chars started [menelaus:warn] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19353.0>:menelaus_web:loop:290] Client-side error-report for user "Administrator" on node 'ns_1@127.0.0.1': User-Agent:Python-httplib2/$Rev: 259 $ 2012-03-26 08:08:12.796522 : test_invalid_chars finished [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: memory_quota -> 3082 [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: memory_quota -> 3082 [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [menelaus:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19366.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:12("Created bucket \"default\" of type: membase\n") because it's been seen 9 times in the past 131.036008 secs (last seen 46.320409 secs ago [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19394.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,0}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19367.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,0}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19368.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason shutdown [menelaus:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19368.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "set-get-bucket-replica-1" of type: membase [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19409.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"set-get-bucket-replica-1", [{num_replicas,1}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}, {"default", [{sasl_password,[]}, {num_replicas,0}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19369.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19369.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"set-get-bucket-replica-1", [{num_replicas,1}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}, {"default", [{sasl_password,[]}, {num_replicas,0}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19370.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19370.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-1" with reason shutdown [menelaus:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19370.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "set-get-bucket-replica-2" of type: membase [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19425.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"set-get-bucket-replica-2", [{num_replicas,2}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}, {"set-get-bucket-replica-1", [{num_replicas,1}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}, {"default", [{sasl_password,[]}, {num_replicas,0}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"set-get-bucket-replica-2", [{num_replicas,2}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}, {"set-get-bucket-replica-1", [{num_replicas,1}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}, {"default", [{sasl_password,[]}, {num_replicas,0}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [stats:error] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19371.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19371.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19371.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19372.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19372.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19372.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-2" with reason shutdown [menelaus:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19372.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "set-get-bucket-replica-3" of type: membase [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19444.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"set-get-bucket-replica-3", [{num_replicas,3}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}, {"set-get-bucket-replica-2", [{num_replicas,2}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}, {"set-get-bucket-replica-1", [{num_replicas,1}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}, {"default", [{sasl_password,[]}, {num_replicas,0}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"set-get-bucket-replica-3", [{num_replicas,3}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}, {"set-get-bucket-replica-2", [{num_replicas,2}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}, {"set-get-bucket-replica-1", [{num_replicas,1}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}, {"default", [{sasl_password,[]}, {num_replicas,0}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [stats:error] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19375.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19375.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19375.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19375.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.19378.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:05:17: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 0, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) moxi<0.18858.0>: 2012-03-26 01:05:17: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 0, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) moxi<0.18858.0>: 2012-03-26 01:05:17: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "set-get-bucket-replica-1", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "password", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/set-get-bucket-replica-1", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", [ns_server:warn] [2012-03-26 1:05:15] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:171] Dropped 200 log lines from moxi [stats:error] [2012-03-26 1:05:16] [ns_1@127.0.0.1:<0.19381.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:16] [ns_1@127.0.0.1:<0.19444.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:16] [ns_1@127.0.0.1:<0.19382.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:17] [ns_1@127.0.0.1:<0.19383.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:17] [ns_1@127.0.0.1:<0.19444.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:17] [ns_1@127.0.0.1:<0.19461.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:18] [ns_1@127.0.0.1:<0.19464.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:18] [ns_1@127.0.0.1:<0.19444.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:18] [ns_1@127.0.0.1:<0.19466.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:19] [ns_1@127.0.0.1:<0.19384.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["set-get-bucket-replica-3"], <0.19444.0>} [ns_server:info] [2012-03-26 1:05:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-3" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:05:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19444.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1985 neighbours: [error_logger:error] [2012-03-26 1:05:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.357.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:05:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19487.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:05:19] [ns_1@127.0.0.1:<0.19474.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:20] [ns_1@127.0.0.1:<0.19477.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:20] [ns_1@127.0.0.1:<0.19481.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:21] [ns_1@127.0.0.1:<0.19385.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:21] [ns_1@127.0.0.1:<0.19490.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:22] [ns_1@127.0.0.1:<0.19493.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:22] [ns_1@127.0.0.1:<0.19495.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:23] [ns_1@127.0.0.1:<0.19388.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:23] [ns_1@127.0.0.1:<0.19501.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:24] [ns_1@127.0.0.1:<0.19503.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:05:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.19487.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:05:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19518.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:05:24] [ns_1@127.0.0.1:<0.19505.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:25] [ns_1@127.0.0.1:<0.19389.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:25] [ns_1@127.0.0.1:<0.19515.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:26] [ns_1@127.0.0.1:<0.19519.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:26] [ns_1@127.0.0.1:<0.19521.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:27] [ns_1@127.0.0.1:<0.19390.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:27] [ns_1@127.0.0.1:<0.19527.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:28] [ns_1@127.0.0.1:<0.19530.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:28] [ns_1@127.0.0.1:<0.19532.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:29] [ns_1@127.0.0.1:<0.19397.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:29] [ns_1@127.0.0.1:<0.19548.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:05:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.19518.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:05:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19552.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:05:30] [ns_1@127.0.0.1:<0.19538.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:30] [ns_1@127.0.0.1:<0.19540.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:30] [ns_1@127.0.0.1:<0.19548.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:31] [ns_1@127.0.0.1:<0.19544.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:31] [ns_1@127.0.0.1:<0.19404.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:31] [ns_1@127.0.0.1:<0.19548.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:32] [ns_1@127.0.0.1:<0.19556.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:32] [ns_1@127.0.0.1:<0.19559.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:32] [ns_1@127.0.0.1:<0.19548.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:33] [ns_1@127.0.0.1:<0.19561.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:33] [ns_1@127.0.0.1:<0.19413.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-3" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:05:33] [ns_1@127.0.0.1:<0.19579.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:05:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19548.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [stats:error] [2012-03-26 1:05:34] [ns_1@127.0.0.1:<0.19568.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:34] [ns_1@127.0.0.1:<0.19572.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:34] [ns_1@127.0.0.1:<0.19579.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:05:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.19552.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:05:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19590.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:05:35] [ns_1@127.0.0.1:<0.19574.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:35] [ns_1@127.0.0.1:<0.19420.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:35] [ns_1@127.0.0.1:<0.19579.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:36] [ns_1@127.0.0.1:<0.19586.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:36] [ns_1@127.0.0.1:<0.19591.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:36] [ns_1@127.0.0.1:<0.19579.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:37] [ns_1@127.0.0.1:<0.19593.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:37] [ns_1@127.0.0.1:<0.19429.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:37] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-2" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:05:37] [ns_1@127.0.0.1:<0.19611.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:05:37] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19579.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [stats:error] [2012-03-26 1:05:38] [ns_1@127.0.0.1:<0.19601.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:38] [ns_1@127.0.0.1:<0.19604.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:38] [ns_1@127.0.0.1:<0.19611.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:39] [ns_1@127.0.0.1:<0.19606.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:39] [ns_1@127.0.0.1:<0.19437.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["set-get-bucket-replica-1", "default"], <0.19611.0>} [ns_server:info] [2012-03-26 1:05:39] [ns_1@127.0.0.1:<0.19611.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:05:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.19590.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:05:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19631.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:05:40] [ns_1@127.0.0.1:<0.19616.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:40] [ns_1@127.0.0.1:<0.19620.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:40] [ns_1@127.0.0.1:<0.19611.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:41] [ns_1@127.0.0.1:<0.19625.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:41] [ns_1@127.0.0.1:<0.19450.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:41] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-1" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:05:41] [ns_1@127.0.0.1:<0.19644.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:05:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19611.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [stats:error] [2012-03-26 1:05:42] [ns_1@127.0.0.1:<0.19634.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:42] [ns_1@127.0.0.1:<0.19637.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:42] [ns_1@127.0.0.1:<0.19644.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:43] [ns_1@127.0.0.1:<0.19639.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:43] [ns_1@127.0.0.1:<0.19459.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:43] [ns_1@127.0.0.1:<0.19644.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:44] [ns_1@127.0.0.1:<0.19650.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:44] [ns_1@127.0.0.1:<0.19653.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:44] [ns_1@127.0.0.1:<0.19644.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:05:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.19631.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:05:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19669.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:05:45] [ns_1@127.0.0.1:<0.19655.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:45] [ns_1@127.0.0.1:<0.19472.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:45] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:05:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19644.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1978 neighbours: [stats:error] [2012-03-26 1:05:46] [ns_1@127.0.0.1:<0.19664.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:46] [ns_1@127.0.0.1:<0.19670.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:47] [ns_1@127.0.0.1:<0.19672.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:47] [ns_1@127.0.0.1:<0.19488.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:48] [ns_1@127.0.0.1:<0.19679.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:48] [ns_1@127.0.0.1:<0.19681.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:49] [ns_1@127.0.0.1:<0.19683.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:49] [ns_1@127.0.0.1:<0.19698.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:49] [ns_1@127.0.0.1:<0.19499.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:05:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.19669.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:05:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19704.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:05:50] [ns_1@127.0.0.1:<0.19690.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:50] [ns_1@127.0.0.1:<0.19698.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:50] [ns_1@127.0.0.1:<0.19692.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:51] [ns_1@127.0.0.1:<0.19701.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:51] [ns_1@127.0.0.1:<0.19698.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:51] [ns_1@127.0.0.1:<0.19510.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:52] [ns_1@127.0.0.1:<0.19708.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:52] [ns_1@127.0.0.1:<0.19698.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:52] [ns_1@127.0.0.1:<0.19711.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:53] [ns_1@127.0.0.1:<0.19716.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-3" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:05:53] [ns_1@127.0.0.1:<0.19728.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:05:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19698.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [stats:error] [2012-03-26 1:05:53] [ns_1@127.0.0.1:<0.19525.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:54] [ns_1@127.0.0.1:<0.19721.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:54] [ns_1@127.0.0.1:<0.19728.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:05:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.19704.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:05:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19741.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:05:54] [ns_1@127.0.0.1:<0.19723.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:55] [ns_1@127.0.0.1:<0.19732.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:55] [ns_1@127.0.0.1:<0.19728.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:55] [ns_1@127.0.0.1:<0.19536.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:56] [ns_1@127.0.0.1:<0.19742.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:56] [ns_1@127.0.0.1:<0.19728.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:56] [ns_1@127.0.0.1:<0.19744.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:57] [ns_1@127.0.0.1:<0.19749.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:57] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-2" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:05:57] [ns_1@127.0.0.1:<0.19762.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:05:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19728.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [stats:error] [2012-03-26 1:05:57] [ns_1@127.0.0.1:<0.19554.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:58] [ns_1@127.0.0.1:<0.19755.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:58] [ns_1@127.0.0.1:<0.19762.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:58] [ns_1@127.0.0.1:<0.19757.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:05:59] [ns_1@127.0.0.1:<0.19765.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:05:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["set-get-bucket-replica-1", "default"], <0.19762.0>} [ns_server:info] [2012-03-26 1:05:59] [ns_1@127.0.0.1:<0.19762.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:05:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,749159,337384}}, {outgoing_replications_safeness_level, [{"set-get-bucket-replica-3",unknown}, {"set-get-bucket-replica-2",unknown}, {"set-get-bucket-replica-1",unknown}, {"default",unknown}]}, {incoming_replications_conf_hashes, [{"set-get-bucket-replica-3",[]}, {"set-get-bucket-replica-2",[]}, {"set-get-bucket-replica-1",[]}, {"default",[]}]}, {replication, [{"set-get-bucket-replica-3",1.0}, {"set-get-bucket-replica-2",1.0}, {"set-get-bucket-replica-1",1.0}, {"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37098000}, {processes,10490656}, {processes_used,8866048}, {system,26607344}, {atom,1306681}, {atom_used,1284164}, {binary,351696}, {code,12859877}, {ets,1675784}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,181}, {memory_data,{4040077312,4007727104,{<0.18805.0>,601392}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 31592 kB\nBuffers: 53984 kB\nCached: 3531184 kB\nSwapCached: 0 kB\nActive: 303756 kB\nInactive: 3442104 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 31592 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 0 kB\nWriteback: 0 kB\nAnonPages: 160724 kB\nMapped: 24856 kB\nSlab: 134564 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 576948 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3615932416}, {buffered_memory,55279616}, {free_memory,32350208}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{176073,0}}, {context_switches,{440721,0}}, {garbage_collection,{210483,203018085,0}}, {io,{{input,15735229},{output,12085536}}}, {reductions,{106595408,715415}}, {run_queue,0}, {runtime,{15470,160}}]}]}] [error_logger:error] [2012-03-26 1:05:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.19741.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:05:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19810.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:05:59] [ns_1@127.0.0.1:<0.19566.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:00] [ns_1@127.0.0.1:<0.19770.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:00] [ns_1@127.0.0.1:<0.19762.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:00] [ns_1@127.0.0.1:<0.19774.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:01] [ns_1@127.0.0.1:<0.19811.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:01] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-1" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:06:01] [ns_1@127.0.0.1:<0.19823.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:06:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19762.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [stats:error] [2012-03-26 1:06:01] [ns_1@127.0.0.1:<0.19582.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:02] [ns_1@127.0.0.1:<0.19816.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:02] [ns_1@127.0.0.1:<0.19823.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:02] [ns_1@127.0.0.1:<0.19818.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:03] [ns_1@127.0.0.1:<0.19826.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:03] [ns_1@127.0.0.1:<0.19823.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:03] [ns_1@127.0.0.1:<0.19599.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:04] [ns_1@127.0.0.1:<0.19832.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:04] [ns_1@127.0.0.1:<0.19823.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:06:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.19810.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:06:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19847.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:06:04] [ns_1@127.0.0.1:<0.19834.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:05] [ns_1@127.0.0.1:<0.19839.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:05] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:06:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19823.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1978 neighbours: [stats:error] [2012-03-26 1:06:06] [ns_1@127.0.0.1:<0.19614.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:06] [ns_1@127.0.0.1:<0.19848.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:07] [ns_1@127.0.0.1:<0.19850.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:07] [ns_1@127.0.0.1:<0.19856.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:08] [ns_1@127.0.0.1:<0.19632.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:08] [ns_1@127.0.0.1:<0.19860.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:09] [ns_1@127.0.0.1:<0.19862.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:09] [ns_1@127.0.0.1:<0.19866.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:09] [ns_1@127.0.0.1:<0.19882.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:06:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.19847.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:06:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19886.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:06:10] [ns_1@127.0.0.1:<0.19648.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:10] [ns_1@127.0.0.1:<0.19871.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:10] [ns_1@127.0.0.1:<0.19882.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:11] [ns_1@127.0.0.1:<0.19876.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:11] [ns_1@127.0.0.1:<0.19887.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:11] [ns_1@127.0.0.1:<0.19882.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:12] [ns_1@127.0.0.1:<0.19660.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:12] [ns_1@127.0.0.1:<0.19892.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:12] [ns_1@127.0.0.1:<0.19882.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:13] [ns_1@127.0.0.1:<0.19894.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:13] [ns_1@127.0.0.1:<0.19900.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-3" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:06:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19882.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:06:13] [ns_1@127.0.0.1:<0.19912.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:14] [ns_1@127.0.0.1:<0.19677.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:14] [ns_1@127.0.0.1:<0.19905.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:14] [ns_1@127.0.0.1:<0.19912.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:06:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.19886.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:06:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19924.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:06:15] [ns_1@127.0.0.1:<0.19907.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:15] [ns_1@127.0.0.1:<0.19915.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:15] [ns_1@127.0.0.1:<0.19912.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:16] [ns_1@127.0.0.1:<0.19688.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:16] [ns_1@127.0.0.1:<0.19912.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:17] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-2" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:06:17] [ns_1@127.0.0.1:<0.19938.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:06:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19912.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:06:18] [ns_1@127.0.0.1:<0.19938.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["set-get-bucket-replica-1", "default"], <0.19938.0>} [ns_server:info] [2012-03-26 1:06:19] [ns_1@127.0.0.1:<0.19938.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:06:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.19924.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:06:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19949.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:06:20] [ns_1@127.0.0.1:<0.19938.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:21] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-1" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:06:21] [ns_1@127.0.0.1:<0.19955.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:06:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19938.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:06:22] [ns_1@127.0.0.1:<0.19955.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:23] [ns_1@127.0.0.1:<0.19955.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:24] [ns_1@127.0.0.1:<0.19955.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:06:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.19949.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:06:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19968.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:06:25] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:06:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19955.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1978 neighbours: [stats:error] [2012-03-26 1:06:26] [ns_1@127.0.0.1:<0.19705.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:26] [ns_1@127.0.0.1:<0.19925.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:27] [ns_1@127.0.0.1:<0.19927.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:27] [ns_1@127.0.0.1:<0.19932.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:28] [ns_1@127.0.0.1:<0.19718.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:28] [ns_1@127.0.0.1:<0.19977.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:29] [ns_1@127.0.0.1:<0.19979.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:29] [ns_1@127.0.0.1:<0.19972.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:29] [ns_1@127.0.0.1:<0.19995.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:06:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.19968.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:06:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20000.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:06:30] [ns_1@127.0.0.1:<0.19737.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:30] [ns_1@127.0.0.1:<0.19987.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:30] [ns_1@127.0.0.1:<0.19995.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:31] [ns_1@127.0.0.1:<0.19991.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:31] [ns_1@127.0.0.1:<0.19983.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:31] [ns_1@127.0.0.1:<0.19995.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:32] [ns_1@127.0.0.1:<0.19751.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:32] [ns_1@127.0.0.1:<0.20006.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:32] [ns_1@127.0.0.1:<0.19995.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:33] [ns_1@127.0.0.1:<0.20008.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-3" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:06:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19995.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:06:33] [ns_1@127.0.0.1:<0.20024.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:33] [ns_1@127.0.0.1:<0.20001.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:34] [ns_1@127.0.0.1:<0.19767.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:34] [ns_1@127.0.0.1:<0.20024.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:34] [ns_1@127.0.0.1:<0.20019.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:06:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20000.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:06:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20037.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:06:35] [ns_1@127.0.0.1:<0.20027.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:35] [ns_1@127.0.0.1:<0.20024.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:35] [ns_1@127.0.0.1:<0.20013.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:36] [ns_1@127.0.0.1:<0.19813.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:36] [ns_1@127.0.0.1:<0.20024.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:36] [ns_1@127.0.0.1:<0.20038.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:37] [ns_1@127.0.0.1:<0.20043.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:37] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-2" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:06:37] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20024.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:06:37] [ns_1@127.0.0.1:<0.20056.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:37] [ns_1@127.0.0.1:<0.20029.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:38] [ns_1@127.0.0.1:<0.19828.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:38] [ns_1@127.0.0.1:<0.20056.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:38] [ns_1@127.0.0.1:<0.20051.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:39] [ns_1@127.0.0.1:<0.20059.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["set-get-bucket-replica-1", "default"], <0.20056.0>} [ns_server:info] [2012-03-26 1:06:39] [ns_1@127.0.0.1:<0.20056.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:06:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20037.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:06:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20076.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:06:39] [ns_1@127.0.0.1:<0.20046.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:40] [ns_1@127.0.0.1:<0.19843.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:40] [ns_1@127.0.0.1:<0.20056.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:40] [ns_1@127.0.0.1:<0.20070.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:41] [ns_1@127.0.0.1:<0.20077.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:41] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-1" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:06:41] [ns_1@127.0.0.1:<0.20089.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:06:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20056.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [stats:error] [2012-03-26 1:06:41] [ns_1@127.0.0.1:<0.20061.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:42] [ns_1@127.0.0.1:<0.19858.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:42] [ns_1@127.0.0.1:<0.20089.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:42] [ns_1@127.0.0.1:<0.20084.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:43] [ns_1@127.0.0.1:<0.20093.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:43] [ns_1@127.0.0.1:<0.20089.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:43] [ns_1@127.0.0.1:<0.20079.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:44] [ns_1@127.0.0.1:<0.19868.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:44] [ns_1@127.0.0.1:<0.20089.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:06:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20076.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:06:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20114.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:06:44] [ns_1@127.0.0.1:<0.20100.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:45] [ns_1@127.0.0.1:<0.20105.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:45] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:06:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20089.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1978 neighbours: [stats:error] [2012-03-26 1:06:45] [ns_1@127.0.0.1:<0.20095.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:46] [ns_1@127.0.0.1:<0.19889.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:46] [ns_1@127.0.0.1:<0.20117.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:47] [ns_1@127.0.0.1:<0.20122.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:47] [ns_1@127.0.0.1:<0.20109.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:48] [ns_1@127.0.0.1:<0.19902.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:49] [ns_1@127.0.0.1:<0.20128.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:49] [ns_1@127.0.0.1:<0.20133.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:49] [ns_1@127.0.0.1:<0.20145.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:06:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20114.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:06:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20149.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:06:50] [ns_1@127.0.0.1:<0.20124.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:50] [ns_1@127.0.0.1:<0.19919.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:50] [ns_1@127.0.0.1:<0.20145.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:51] [ns_1@127.0.0.1:<0.20141.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:51] [ns_1@127.0.0.1:<0.20150.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:51] [ns_1@127.0.0.1:<0.20145.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:52] [ns_1@127.0.0.1:<0.20135.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:52] [ns_1@127.0.0.1:<0.19974.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:52] [ns_1@127.0.0.1:<0.20145.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:53] [ns_1@127.0.0.1:<0.20158.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:53] [ns_1@127.0.0.1:<0.20163.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-3" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:06:53] [ns_1@127.0.0.1:<0.20175.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:06:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20145.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [stats:error] [2012-03-26 1:06:54] [ns_1@127.0.0.1:<0.20152.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:54] [ns_1@127.0.0.1:<0.19985.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:54] [ns_1@127.0.0.1:<0.20175.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:06:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20149.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:06:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20188.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:06:55] [ns_1@127.0.0.1:<0.20170.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:55] [ns_1@127.0.0.1:<0.20179.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:55] [ns_1@127.0.0.1:<0.20175.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:56] [ns_1@127.0.0.1:<0.20165.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:56] [ns_1@127.0.0.1:<0.20003.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:56] [ns_1@127.0.0.1:<0.20175.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:57] [ns_1@127.0.0.1:<0.20191.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:57] [ns_1@127.0.0.1:<0.20196.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:57] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-2" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:06:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20175.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:06:57] [ns_1@127.0.0.1:<0.20209.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:58] [ns_1@127.0.0.1:<0.20184.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:58] [ns_1@127.0.0.1:<0.20015.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:58] [ns_1@127.0.0.1:<0.20209.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:59] [ns_1@127.0.0.1:<0.20204.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:06:59] [ns_1@127.0.0.1:<0.20212.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:06:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["set-get-bucket-replica-1", "default"], <0.20209.0>} [ns_server:info] [2012-03-26 1:06:59] [ns_1@127.0.0.1:<0.20209.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:06:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,749219,381767}}, {outgoing_replications_safeness_level, [{"set-get-bucket-replica-3",unknown}, {"set-get-bucket-replica-2",unknown}, {"set-get-bucket-replica-1",unknown}, {"default",unknown}]}, {incoming_replications_conf_hashes, [{"set-get-bucket-replica-3",[]}, {"set-get-bucket-replica-2",[]}, {"set-get-bucket-replica-1",[]}, {"default",[]}]}, {replication, [{"set-get-bucket-replica-3",1.0}, {"set-get-bucket-replica-2",1.0}, {"set-get-bucket-replica-1",1.0}, {"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37232536}, {processes,10583840}, {processes_used,8959232}, {system,26648696}, {atom,1306681}, {atom_used,1284164}, {binary,352432}, {code,12859877}, {ets,1706896}]}, {system_stats, [{cpu_utilization_rate,25.06265664160401}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,241}, {memory_data,{4040077312,4008624128,{<0.18805.0>,601392}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 30724 kB\nBuffers: 54224 kB\nCached: 3531328 kB\nSwapCached: 0 kB\nActive: 303852 kB\nInactive: 3442572 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 30724 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 0 kB\nWriteback: 0 kB\nAnonPages: 160872 kB\nMapped: 24856 kB\nSlab: 134504 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 576948 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616079872}, {buffered_memory,55525376}, {free_memory,31461376}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{236115,0}}, {context_switches,{454763,0}}, {garbage_collection,{217817,214928161,0}}, {io,{{input,16651012},{output,13400607}}}, {reductions,{109294214,702974}}, {run_queue,0}, {runtime,{16160,170}}]}]}] [error_logger:error] [2012-03-26 1:06:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20188.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:06:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20229.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:07:00] [ns_1@127.0.0.1:<0.20198.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:00] [ns_1@127.0.0.1:<0.20034.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:00] [ns_1@127.0.0.1:<0.20209.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:01] [ns_1@127.0.0.1:<0.20221.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:01] [ns_1@127.0.0.1:<0.20230.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:07:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20209.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:07:01] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-1" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:07:01] [ns_1@127.0.0.1:<0.20242.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:02] [ns_1@127.0.0.1:<0.20214.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:02] [ns_1@127.0.0.1:<0.20049.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:02] [ns_1@127.0.0.1:<0.20242.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:03] [ns_1@127.0.0.1:<0.20237.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:03] [ns_1@127.0.0.1:<0.20245.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:03] [ns_1@127.0.0.1:<0.20242.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:04] [ns_1@127.0.0.1:<0.20232.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:04] [ns_1@127.0.0.1:<0.20065.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:04] [ns_1@127.0.0.1:<0.20242.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:07:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20229.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:07:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20266.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:07:05] [ns_1@127.0.0.1:<0.20253.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:05] [ns_1@127.0.0.1:<0.20258.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:05] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:07:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20242.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1978 neighbours: [stats:error] [2012-03-26 1:07:06] [ns_1@127.0.0.1:<0.20247.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:06] [ns_1@127.0.0.1:<0.20082.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:07] [ns_1@127.0.0.1:<0.20269.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:07] [ns_1@127.0.0.1:<0.20275.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:08] [ns_1@127.0.0.1:<0.20262.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:08] [ns_1@127.0.0.1:<0.20098.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:09] [ns_1@127.0.0.1:<0.20283.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:09] [ns_1@127.0.0.1:<0.20299.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:09] [ns_1@127.0.0.1:<0.20285.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:07:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20266.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:07:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20305.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:07:10] [ns_1@127.0.0.1:<0.20277.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:10] [ns_1@127.0.0.1:<0.20299.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:10] [ns_1@127.0.0.1:<0.20115.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:11] [ns_1@127.0.0.1:<0.20302.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:11] [ns_1@127.0.0.1:<0.20299.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:11] [ns_1@127.0.0.1:<0.20306.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:12] [ns_1@127.0.0.1:<0.20287.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:12] [ns_1@127.0.0.1:<0.20299.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:12] [ns_1@127.0.0.1:<0.20126.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:13] [ns_1@127.0.0.1:<0.20317.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-3" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:07:13] [ns_1@127.0.0.1:<0.20329.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:07:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20299.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [stats:error] [2012-03-26 1:07:13] [ns_1@127.0.0.1:<0.20319.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:14] [ns_1@127.0.0.1:<0.20309.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:14] [ns_1@127.0.0.1:<0.20329.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:07:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20305.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:07:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20341.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:07:14] [ns_1@127.0.0.1:<0.20137.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:15] [ns_1@127.0.0.1:<0.20332.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:15] [ns_1@127.0.0.1:<0.20329.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:15] [ns_1@127.0.0.1:<0.20334.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:16] [ns_1@127.0.0.1:<0.20322.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:16] [ns_1@127.0.0.1:<0.20329.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:16] [ns_1@127.0.0.1:<0.20156.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:17] [ns_1@127.0.0.1:<0.20349.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:17] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-2" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:07:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20329.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:07:17] [ns_1@127.0.0.1:<0.20361.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:17] [ns_1@127.0.0.1:<0.20351.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:18] [ns_1@127.0.0.1:<0.20342.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:18] [ns_1@127.0.0.1:<0.20361.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:18] [ns_1@127.0.0.1:<0.20168.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:19] [ns_1@127.0.0.1:<0.20365.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["set-get-bucket-replica-1", "default"], <0.20361.0>} [ns_server:info] [2012-03-26 1:07:19] [ns_1@127.0.0.1:<0.20361.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:07:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20341.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:07:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20380.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:07:19] [ns_1@127.0.0.1:<0.20367.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:20] [ns_1@127.0.0.1:<0.20354.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:20] [ns_1@127.0.0.1:<0.20361.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:20] [ns_1@127.0.0.1:<0.20189.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:21] [ns_1@127.0.0.1:<0.20381.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:21] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-1" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:07:21] [ns_1@127.0.0.1:<0.20394.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:07:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20361.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [stats:error] [2012-03-26 1:07:21] [ns_1@127.0.0.1:<0.20383.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:22] [ns_1@127.0.0.1:<0.20370.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:22] [ns_1@127.0.0.1:<0.20394.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:23] [ns_1@127.0.0.1:<0.20202.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:23] [ns_1@127.0.0.1:<0.20397.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:23] [ns_1@127.0.0.1:<0.20394.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:24] [ns_1@127.0.0.1:<0.20399.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:24] [ns_1@127.0.0.1:<0.20387.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:24] [ns_1@127.0.0.1:<0.20394.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:07:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20380.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:07:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20419.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:07:25] [ns_1@127.0.0.1:<0.20217.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:25] [ns_1@127.0.0.1:<0.20410.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:25] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:07:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20394.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1978 neighbours: [stats:error] [2012-03-26 1:07:26] [ns_1@127.0.0.1:<0.20415.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:26] [ns_1@127.0.0.1:<0.20402.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:26] [ns_1@127.0.0.1:<0.20420.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:26] [ns_1@127.0.0.1:<0.20420.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:26] [ns_1@127.0.0.1:<0.20420.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:26] [ns_1@127.0.0.1:<0.20420.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:26] [ns_1@127.0.0.1:<0.20235.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:27] [ns_1@127.0.0.1:<0.20251.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:27] [ns_1@127.0.0.1:<0.20427.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:28] [ns_1@127.0.0.1:<0.20429.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:28] [ns_1@127.0.0.1:<0.20439.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:29] [ns_1@127.0.0.1:<0.20267.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:29] [ns_1@127.0.0.1:<0.20445.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:29] [ns_1@127.0.0.1:<0.20457.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:07:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20419.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:07:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20462.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:07:30] [ns_1@127.0.0.1:<0.20457.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:31] [ns_1@127.0.0.1:<0.20457.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:32] [ns_1@127.0.0.1:<0.20457.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:30] [ns_1@127.0.0.1:<0.20431.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-3" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:07:33] [ns_1@127.0.0.1:<0.20474.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:07:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20457.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [stats:error] [2012-03-26 1:07:34] [ns_1@127.0.0.1:<0.20463.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:34] [ns_1@127.0.0.1:<0.20436.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:34] [ns_1@127.0.0.1:<0.20474.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:07:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20462.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:07:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20485.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:07:35] [ns_1@127.0.0.1:<0.20449.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:35] [ns_1@127.0.0.1:<0.20422.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:35] [ns_1@127.0.0.1:<0.20474.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:36] [ns_1@127.0.0.1:<0.20481.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:36] [ns_1@127.0.0.1:<0.20447.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:36] [ns_1@127.0.0.1:<0.20474.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:37] [ns_1@127.0.0.1:<0.20279.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:37] [ns_1@127.0.0.1:<0.20441.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:37] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-2" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:07:37] [ns_1@127.0.0.1:<0.20506.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:07:37] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20474.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [stats:error] [2012-03-26 1:07:38] [ns_1@127.0.0.1:<0.20496.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:38] [ns_1@127.0.0.1:<0.20486.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:38] [ns_1@127.0.0.1:<0.20506.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:39] [ns_1@127.0.0.1:<0.20290.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:39] [ns_1@127.0.0.1:<0.20453.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["set-get-bucket-replica-1", "default"], <0.20506.0>} [ns_server:info] [2012-03-26 1:07:39] [ns_1@127.0.0.1:<0.20506.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:07:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20485.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:07:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20526.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:07:40] [ns_1@127.0.0.1:<0.20511.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:40] [ns_1@127.0.0.1:<0.20499.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:40] [ns_1@127.0.0.1:<0.20506.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:41] [ns_1@127.0.0.1:<0.20311.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:41] [ns_1@127.0.0.1:<0.20477.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:41] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-1" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:07:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20506.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:07:41] [ns_1@127.0.0.1:<0.20539.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:42] [ns_1@127.0.0.1:<0.20529.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:42] [ns_1@127.0.0.1:<0.20515.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:42] [ns_1@127.0.0.1:<0.20539.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:43] [ns_1@127.0.0.1:<0.20324.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:43] [ns_1@127.0.0.1:<0.20494.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:43] [ns_1@127.0.0.1:<0.20539.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:44] [ns_1@127.0.0.1:<0.20545.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:44] [ns_1@127.0.0.1:<0.20532.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:44] [ns_1@127.0.0.1:<0.20539.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:07:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20526.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:07:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20564.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:07:45] [ns_1@127.0.0.1:<0.20344.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:45] [ns_1@127.0.0.1:<0.20509.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:45] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:07:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20539.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1978 neighbours: [stats:error] [2012-03-26 1:07:46] [ns_1@127.0.0.1:<0.20559.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:46] [ns_1@127.0.0.1:<0.20548.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:47] [ns_1@127.0.0.1:<0.20356.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:47] [ns_1@127.0.0.1:<0.20527.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:48] [ns_1@127.0.0.1:<0.20574.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:48] [ns_1@127.0.0.1:<0.20565.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:49] [ns_1@127.0.0.1:<0.20374.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:49] [ns_1@127.0.0.1:<0.20543.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:49] [ns_1@127.0.0.1:<0.20595.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:07:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20564.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:07:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20599.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:07:50] [ns_1@127.0.0.1:<0.20585.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:50] [ns_1@127.0.0.1:<0.20576.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:50] [ns_1@127.0.0.1:<0.20595.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:51] [ns_1@127.0.0.1:<0.20389.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:51] [ns_1@127.0.0.1:<0.20555.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:51] [ns_1@127.0.0.1:<0.20595.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:52] [ns_1@127.0.0.1:<0.20602.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:52] [ns_1@127.0.0.1:<0.20595.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:52] [ns_1@127.0.0.1:<0.20587.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:53] [ns_1@127.0.0.1:<0.20404.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-3" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:07:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20595.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:07:53] [ns_1@127.0.0.1:<0.20623.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:53] [ns_1@127.0.0.1:<0.20572.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:54] [ns_1@127.0.0.1:<0.20616.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:54] [ns_1@127.0.0.1:<0.20623.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:54] [ns_1@127.0.0.1:<0.20606.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:07:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20599.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:07:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20638.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:07:55] [ns_1@127.0.0.1:<0.20488.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:55] [ns_1@127.0.0.1:<0.20623.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:55] [ns_1@127.0.0.1:<0.20583.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:56] [ns_1@127.0.0.1:<0.20635.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:56] [ns_1@127.0.0.1:<0.20623.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:56] [ns_1@127.0.0.1:<0.20618.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:57] [ns_1@127.0.0.1:<0.20501.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:57] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-2" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:07:57] [ns_1@127.0.0.1:<0.20657.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:07:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20623.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [stats:error] [2012-03-26 1:07:57] [ns_1@127.0.0.1:<0.20600.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:58] [ns_1@127.0.0.1:<0.20650.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:58] [ns_1@127.0.0.1:<0.20657.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:58] [ns_1@127.0.0.1:<0.20639.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:07:59] [ns_1@127.0.0.1:<0.20520.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:07:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["set-get-bucket-replica-1", "default"], <0.20657.0>} [ns_server:info] [2012-03-26 1:07:59] [ns_1@127.0.0.1:<0.20657.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:07:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,749279,426595}}, {outgoing_replications_safeness_level, [{"set-get-bucket-replica-3",unknown}, {"set-get-bucket-replica-2",unknown}, {"set-get-bucket-replica-1",unknown}, {"default",unknown}]}, {incoming_replications_conf_hashes, [{"set-get-bucket-replica-3",[]}, {"set-get-bucket-replica-2",[]}, {"set-get-bucket-replica-1",[]}, {"default",[]}]}, {replication, [{"set-get-bucket-replica-3",1.0}, {"set-get-bucket-replica-2",1.0}, {"set-get-bucket-replica-1",1.0}, {"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37009048}, {processes,10316984}, {processes_used,8692376}, {system,26692064}, {atom,1306681}, {atom_used,1284164}, {binary,354176}, {code,12859877}, {ets,1735448}]}, {system_stats, [{cpu_utilization_rate,25.06265664160401}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,301}, {memory_data,{4040077312,4009132032,{<0.18805.0>,601392}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 30476 kB\nBuffers: 54344 kB\nCached: 3531516 kB\nSwapCached: 0 kB\nActive: 303968 kB\nInactive: 3442784 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 30476 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 12 kB\nWriteback: 0 kB\nAnonPages: 160892 kB\nMapped: 24856 kB\nSlab: 134472 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 576948 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616272384}, {buffered_memory,55648256}, {free_memory,31207424}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{296160,0}}, {context_switches,{468440,0}}, {garbage_collection,{225303,227709496,0}}, {io,{{input,16682402},{output,13948996}}}, {reductions,{112094424,723932}}, {run_queue,0}, {runtime,{16820,150}}]}]}] [error_logger:error] [2012-03-26 1:07:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20638.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:07:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20677.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:07:59] [ns_1@127.0.0.1:<0.20613.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:00] [ns_1@127.0.0.1:<0.20665.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:00] [ns_1@127.0.0.1:<0.20657.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:00] [ns_1@127.0.0.1:<0.20652.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:01] [ns_1@127.0.0.1:<0.20534.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:08:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20657.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:08:01] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-1" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:08:01] [ns_1@127.0.0.1:<0.20690.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:01] [ns_1@127.0.0.1:<0.20629.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:02] [ns_1@127.0.0.1:<0.20683.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:02] [ns_1@127.0.0.1:<0.20690.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:02] [ns_1@127.0.0.1:<0.20667.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:03] [ns_1@127.0.0.1:<0.20550.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:03] [ns_1@127.0.0.1:<0.20690.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:03] [ns_1@127.0.0.1:<0.20646.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:04] [ns_1@127.0.0.1:<0.20699.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:04] [ns_1@127.0.0.1:<0.20690.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:08:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20677.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:08:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20714.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:08:04] [ns_1@127.0.0.1:<0.20685.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:05] [ns_1@127.0.0.1:<0.20567.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:05] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:08:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20690.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1978 neighbours: [stats:error] [2012-03-26 1:08:05] [ns_1@127.0.0.1:<0.20662.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:06] [ns_1@127.0.0.1:<0.20715.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:06] [ns_1@127.0.0.1:<0.20701.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:07] [ns_1@127.0.0.1:<0.20578.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:08] [ns_1@127.0.0.1:<0.20680.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:08] [ns_1@127.0.0.1:<0.20727.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:09] [ns_1@127.0.0.1:<0.20717.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:09] [ns_1@127.0.0.1:<0.20591.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:09] [ns_1@127.0.0.1:<0.20749.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:08:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20714.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:08:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20753.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:08:10] [ns_1@127.0.0.1:<0.20695.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:10] [ns_1@127.0.0.1:<0.20738.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:10] [ns_1@127.0.0.1:<0.20749.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:11] [ns_1@127.0.0.1:<0.20729.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:11] [ns_1@127.0.0.1:<0.20610.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:11] [ns_1@127.0.0.1:<0.20749.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:12] [ns_1@127.0.0.1:<0.20710.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:12] [ns_1@127.0.0.1:<0.20759.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:12] [ns_1@127.0.0.1:<0.20749.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:13] [ns_1@127.0.0.1:<0.20743.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:13] [ns_1@127.0.0.1:<0.20626.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-3" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:08:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20749.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:08:13] [ns_1@127.0.0.1:<0.20779.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:14] [ns_1@127.0.0.1:<0.20725.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:14] [ns_1@127.0.0.1:<0.20772.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:14] [ns_1@127.0.0.1:<0.20779.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:08:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20753.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:08:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20791.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:08:15] [ns_1@127.0.0.1:<0.20761.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:15] [ns_1@127.0.0.1:<0.20644.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:15] [ns_1@127.0.0.1:<0.20779.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:16] [ns_1@127.0.0.1:<0.20735.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:16] [ns_1@127.0.0.1:<0.20792.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:16] [ns_1@127.0.0.1:<0.20779.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:17] [ns_1@127.0.0.1:<0.20774.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:17] [ns_1@127.0.0.1:<0.20660.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:17] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-2" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:08:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20779.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:08:17] [ns_1@127.0.0.1:<0.20811.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:18] [ns_1@127.0.0.1:<0.20756.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:18] [ns_1@127.0.0.1:<0.20804.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:18] [ns_1@127.0.0.1:<0.20811.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:19] [ns_1@127.0.0.1:<0.20794.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:19] [ns_1@127.0.0.1:<0.20678.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["set-get-bucket-replica-1", "default"], <0.20811.0>} [ns_server:info] [2012-03-26 1:08:19] [ns_1@127.0.0.1:<0.20811.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:08:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20791.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:08:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20830.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:08:20] [ns_1@127.0.0.1:<0.20769.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:20] [ns_1@127.0.0.1:<0.20820.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:20] [ns_1@127.0.0.1:<0.20811.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:21] [ns_1@127.0.0.1:<0.20806.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:21] [ns_1@127.0.0.1:<0.20693.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:21] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-1" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:08:21] [ns_1@127.0.0.1:<0.20844.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:08:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20811.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [stats:error] [2012-03-26 1:08:22] [ns_1@127.0.0.1:<0.20786.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:22] [ns_1@127.0.0.1:<0.20837.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:22] [ns_1@127.0.0.1:<0.20844.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:23] [ns_1@127.0.0.1:<0.20824.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:23] [ns_1@127.0.0.1:<0.20706.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:23] [ns_1@127.0.0.1:<0.20844.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:24] [ns_1@127.0.0.1:<0.20801.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:24] [ns_1@127.0.0.1:<0.20852.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:24] [ns_1@127.0.0.1:<0.20844.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:08:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20830.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:08:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20869.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:08:25] [ns_1@127.0.0.1:<0.20839.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:25] [ns_1@127.0.0.1:<0.20723.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:25] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:08:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20844.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1978 neighbours: [stats:error] [2012-03-26 1:08:26] [ns_1@127.0.0.1:<0.20817.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:26] [ns_1@127.0.0.1:<0.20870.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:26] [ns_1@127.0.0.1:<0.20854.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:26] [ns_1@127.0.0.1:<0.20872.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:26] [ns_1@127.0.0.1:<0.20872.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:26] [ns_1@127.0.0.1:<0.20872.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:26] [ns_1@127.0.0.1:<0.20872.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:26] [ns_1@127.0.0.1:<0.20733.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:27] [ns_1@127.0.0.1:<0.20754.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:27] [ns_1@127.0.0.1:<0.20767.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:28] [ns_1@127.0.0.1:<0.20833.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:28] [ns_1@127.0.0.1:<0.20891.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:29] [ns_1@127.0.0.1:<0.20895.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:29] [ns_1@127.0.0.1:<0.20907.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:29] [ns_1@127.0.0.1:<0.20782.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:08:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20869.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:08:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20914.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:08:30] [ns_1@127.0.0.1:<0.20849.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:30] [ns_1@127.0.0.1:<0.20907.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:30] [ns_1@127.0.0.1:<0.20901.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:31] [ns_1@127.0.0.1:<0.20911.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:31] [ns_1@127.0.0.1:<0.20907.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:31] [ns_1@127.0.0.1:<0.20799.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:32] [ns_1@127.0.0.1:<0.20865.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:32] [ns_1@127.0.0.1:<0.20907.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:32] [ns_1@127.0.0.1:<0.20920.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:33] [ns_1@127.0.0.1:<0.20925.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:08:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20907.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:08:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-3" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:08:33] [ns_1@127.0.0.1:<0.20938.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:33] [ns_1@127.0.0.1:<0.20815.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:34] [ns_1@127.0.0.1:<0.20879.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:34] [ns_1@127.0.0.1:<0.20938.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:08:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20914.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:08:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20949.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:08:34] [ns_1@127.0.0.1:<0.20933.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:35] [ns_1@127.0.0.1:<0.20941.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:35] [ns_1@127.0.0.1:<0.20938.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:35] [ns_1@127.0.0.1:<0.20831.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:36] [ns_1@127.0.0.1:<0.20881.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:36] [ns_1@127.0.0.1:<0.20938.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:36] [ns_1@127.0.0.1:<0.20952.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:37] [ns_1@127.0.0.1:<0.20958.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:37] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-2" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:08:37] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20938.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:08:37] [ns_1@127.0.0.1:<0.20970.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:37] [ns_1@127.0.0.1:<0.20847.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:38] [ns_1@127.0.0.1:<0.20883.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:38] [ns_1@127.0.0.1:<0.20970.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:38] [ns_1@127.0.0.1:<0.20965.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:39] [ns_1@127.0.0.1:<0.20973.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["set-get-bucket-replica-1", "default"], <0.20970.0>} [ns_server:info] [2012-03-26 1:08:39] [ns_1@127.0.0.1:<0.20970.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:08:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20949.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:08:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20990.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:08:39] [ns_1@127.0.0.1:<0.20860.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:40] [ns_1@127.0.0.1:<0.20888.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:40] [ns_1@127.0.0.1:<0.20970.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:41] [ns_1@127.0.0.1:<0.20984.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:41] [ns_1@127.0.0.1:<0.20991.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:41] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-1" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:08:41] [ns_1@127.0.0.1:<0.21003.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:08:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20970.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [stats:error] [2012-03-26 1:08:42] [ns_1@127.0.0.1:<0.20877.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:42] [ns_1@127.0.0.1:<0.20899.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:42] [ns_1@127.0.0.1:<0.21003.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:43] [ns_1@127.0.0.1:<0.20998.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:43] [ns_1@127.0.0.1:<0.21007.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:43] [ns_1@127.0.0.1:<0.21003.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:44] [ns_1@127.0.0.1:<0.20897.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:44] [ns_1@127.0.0.1:<0.20918.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:44] [ns_1@127.0.0.1:<0.21003.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:08:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.20990.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:08:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21028.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:08:45] [ns_1@127.0.0.1:<0.21014.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:45] [ns_1@127.0.0.1:<0.21019.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:45] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:08:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21003.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1978 neighbours: [stats:error] [2012-03-26 1:08:46] [ns_1@127.0.0.1:<0.20915.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:46] [ns_1@127.0.0.1:<0.20931.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:47] [ns_1@127.0.0.1:<0.21031.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:47] [ns_1@127.0.0.1:<0.21036.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:48] [ns_1@127.0.0.1:<0.20927.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:48] [ns_1@127.0.0.1:<0.20950.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:49] [ns_1@127.0.0.1:<0.21042.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:49] [ns_1@127.0.0.1:<0.21047.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:49] [ns_1@127.0.0.1:<0.21059.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:08:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.21028.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:08:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21063.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:08:50] [ns_1@127.0.0.1:<0.20943.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:50] [ns_1@127.0.0.1:<0.20963.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:50] [ns_1@127.0.0.1:<0.21059.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:51] [ns_1@127.0.0.1:<0.21055.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:51] [ns_1@127.0.0.1:<0.21064.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:51] [ns_1@127.0.0.1:<0.21059.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:52] [ns_1@127.0.0.1:<0.20960.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:52] [ns_1@127.0.0.1:<0.20979.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:52] [ns_1@127.0.0.1:<0.21059.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:53] [ns_1@127.0.0.1:<0.21072.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:53] [ns_1@127.0.0.1:<0.21077.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:08:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21059.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:08:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-3" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:08:53] [ns_1@127.0.0.1:<0.21089.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:54] [ns_1@127.0.0.1:<0.20975.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:54] [ns_1@127.0.0.1:<0.20996.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:54] [ns_1@127.0.0.1:<0.21089.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:08:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.21063.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:08:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21102.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:08:55] [ns_1@127.0.0.1:<0.21084.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:55] [ns_1@127.0.0.1:<0.21093.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:55] [ns_1@127.0.0.1:<0.21089.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:56] [ns_1@127.0.0.1:<0.20993.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:56] [ns_1@127.0.0.1:<0.21012.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:56] [ns_1@127.0.0.1:<0.21089.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:57] [ns_1@127.0.0.1:<0.21105.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:57] [ns_1@127.0.0.1:<0.21110.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:57] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-2" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [ns_server:info] [2012-03-26 1:08:57] [ns_1@127.0.0.1:<0.21123.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:08:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21089.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [stats:error] [2012-03-26 1:08:58] [ns_1@127.0.0.1:<0.21009.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:58] [ns_1@127.0.0.1:<0.21029.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:58] [ns_1@127.0.0.1:<0.21123.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:08:59] [ns_1@127.0.0.1:<0.21118.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["set-get-bucket-replica-1", "default"], <0.21123.0>} [stats:error] [2012-03-26 1:08:59] [ns_1@127.0.0.1:<0.21126.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:08:59] [ns_1@127.0.0.1:<0.21123.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:08:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,749339,469605}}, {outgoing_replications_safeness_level, [{"set-get-bucket-replica-3",unknown}, {"set-get-bucket-replica-2",unknown}, {"set-get-bucket-replica-1",unknown}, {"default",unknown}]}, {incoming_replications_conf_hashes, [{"set-get-bucket-replica-3",[]}, {"set-get-bucket-replica-2",[]}, {"set-get-bucket-replica-1",[]}, {"default",[]}]}, {replication, [{"set-get-bucket-replica-3",1.0}, {"set-get-bucket-replica-2",1.0}, {"set-get-bucket-replica-1",1.0}, {"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37304328}, {processes,10572928}, {processes_used,8948320}, {system,26731400}, {atom,1306681}, {atom_used,1284164}, {binary,349648}, {code,12859877}, {ets,1764048}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,361}, {memory_data,{4040077312,4009250816,{<0.18805.0>,601392}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 30104 kB\nBuffers: 54464 kB\nCached: 3531704 kB\nSwapCached: 0 kB\nActive: 304156 kB\nInactive: 3442944 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 30104 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 12 kB\nWriteback: 0 kB\nAnonPages: 160932 kB\nMapped: 24856 kB\nSlab: 134464 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 576948 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616464896}, {buffered_memory,55771136}, {free_memory,30826496}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{356204,1}}, {context_switches,{482415,0}}, {garbage_collection,{232912,240849310,0}}, {io,{{input,16713828},{output,14509847}}}, {reductions,{114971630,694812}}, {run_queue,0}, {runtime,{17500,170}}]}]}] [error_logger:error] [2012-03-26 1:08:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.21102.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:08:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21156.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:09:00] [ns_1@127.0.0.1:<0.21023.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:00] [ns_1@127.0.0.1:<0.21123.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:00] [ns_1@127.0.0.1:<0.21040.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:01] [ns_1@127.0.0.1:<0.21151.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:01] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-1" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:09:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21123.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:09:01] [ns_1@127.0.0.1:<0.21167.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:01] [ns_1@127.0.0.1:<0.21157.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:02] [ns_1@127.0.0.1:<0.21038.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:02] [ns_1@127.0.0.1:<0.21167.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:02] [ns_1@127.0.0.1:<0.21051.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:03] [ns_1@127.0.0.1:<0.21170.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:03] [ns_1@127.0.0.1:<0.21167.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:03] [ns_1@127.0.0.1:<0.21172.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:04] [ns_1@127.0.0.1:<0.21049.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:04] [ns_1@127.0.0.1:<0.21167.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:09:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.21156.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:09:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21192.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:09:04] [ns_1@127.0.0.1:<0.21070.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:05] [ns_1@127.0.0.1:<0.21183.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:05] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:09:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21167.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1978 neighbours: [stats:error] [2012-03-26 1:09:05] [ns_1@127.0.0.1:<0.21185.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:06] [ns_1@127.0.0.1:<0.21066.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:06] [ns_1@127.0.0.1:<0.21082.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:07] [ns_1@127.0.0.1:<0.21200.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:07] [ns_1@127.0.0.1:<0.21202.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:08] [ns_1@127.0.0.1:<0.21079.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:08] [ns_1@127.0.0.1:<0.21103.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:09] [ns_1@127.0.0.1:<0.21210.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:09] [ns_1@127.0.0.1:<0.21226.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:09:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.21192.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:09:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21230.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:09:09] [ns_1@127.0.0.1:<0.21212.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:10] [ns_1@127.0.0.1:<0.21098.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:10] [ns_1@127.0.0.1:<0.21226.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:10] [ns_1@127.0.0.1:<0.21116.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:11] [ns_1@127.0.0.1:<0.21231.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:11] [ns_1@127.0.0.1:<0.21226.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:11] [ns_1@127.0.0.1:<0.21233.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:12] [ns_1@127.0.0.1:<0.21112.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:12] [ns_1@127.0.0.1:<0.21226.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:12] [ns_1@127.0.0.1:<0.21131.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:13] [ns_1@127.0.0.1:<0.21244.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-3" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:09:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21226.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-3', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:09:13] [ns_1@127.0.0.1:<0.21256.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:13] [ns_1@127.0.0.1:<0.21246.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:14] [ns_1@127.0.0.1:<0.21128.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:14] [ns_1@127.0.0.1:<0.21256.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:09:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.21230.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:09:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21268.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:09:15] [ns_1@127.0.0.1:<0.21162.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:15] [ns_1@127.0.0.1:<0.21259.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:15] [ns_1@127.0.0.1:<0.21256.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:16] [ns_1@127.0.0.1:<0.21263.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:16] [ns_1@127.0.0.1:<0.21160.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:16] [ns_1@127.0.0.1:<0.21256.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:17] [ns_1@127.0.0.1:<0.21178.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:17] [ns_1@127.0.0.1:<0.21276.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:17] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-2" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:09:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21256.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-2', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:09:17] [ns_1@127.0.0.1:<0.21288.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:18] [ns_1@127.0.0.1:<0.21278.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:18] [ns_1@127.0.0.1:<0.21176.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:18] [ns_1@127.0.0.1:<0.21288.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:19] [ns_1@127.0.0.1:<0.21194.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:19] [ns_1@127.0.0.1:<0.21292.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["set-get-bucket-replica-1", "default"], <0.21288.0>} [ns_server:info] [2012-03-26 1:09:19] [ns_1@127.0.0.1:<0.21288.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:09:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.21268.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:09:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21307.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:09:20] [ns_1@127.0.0.1:<0.21294.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:20] [ns_1@127.0.0.1:<0.21191.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:20] [ns_1@127.0.0.1:<0.21288.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-1" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:21] [ns_1@127.0.0.1:<0.21206.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:21] [ns_1@127.0.0.1:<0.21308.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:21] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-1" with reason {noproc, {gen_server, call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:09:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21288.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-set-get-bucket-replica-1', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1986 neighbours: [ns_server:info] [2012-03-26 1:09:21] [ns_1@127.0.0.1:<0.21321.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:22] [ns_1@127.0.0.1:<0.21310.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:22] [ns_1@127.0.0.1:<0.21204.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:22] [ns_1@127.0.0.1:<0.21321.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:23] [ns_1@127.0.0.1:<0.21217.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:23] [ns_1@127.0.0.1:<0.21324.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:23] [ns_1@127.0.0.1:<0.21321.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:24] [ns_1@127.0.0.1:<0.21326.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:24] [ns_1@127.0.0.1:<0.21215.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:24] [ns_1@127.0.0.1:<0.21321.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:09:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.21307.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:09:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21346.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:09:25] [ns_1@127.0.0.1:<0.21238.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:25] [ns_1@127.0.0.1:<0.21337.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:25] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:09:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21321.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1978 neighbours: [stats:error] [2012-03-26 1:09:26] [ns_1@127.0.0.1:<0.21342.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:26] [ns_1@127.0.0.1:<0.21236.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:27] [ns_1@127.0.0.1:<0.21251.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:27] [ns_1@127.0.0.1:<0.21271.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:27] [ns_1@127.0.0.1:<0.21283.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:27] [ns_1@127.0.0.1:<0.21283.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:27] [ns_1@127.0.0.1:<0.21283.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:27] [ns_1@127.0.0.1:<0.21283.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:27] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"set-get-bucket-replica-3", [{num_replicas,3}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}, {"set-get-bucket-replica-2", [{num_replicas,2}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}, {"set-get-bucket-replica-1", [{num_replicas,1}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:09:27] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:09:27] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:09:27] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:09:27] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:09:27] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:09:29: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "set-get-bucket-replica-3", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "password", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/set-get-bucket-replica-3", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 3, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) moxi<0.18858.0>: 2012-03-26 01:09:29: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "set-get-bucket-replica-2", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "password", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/set-get-bucket-replica-2", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 2, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) moxi<0.18858.0>: 2012-03-26 01:09:29: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "set-get-bucket-replica-1", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "password", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/set-get-bucket-replica-1", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", [ns_server:warn] [2012-03-26 1:09:27] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:171] Dropped 18 log lines from moxi [error_logger:error] [2012-03-26 1:09:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.21346.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:09:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21383.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:warn] [2012-03-26 1:09:32] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 1:09:32] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:09:32] [ns_1@127.0.0.1:<0.21301.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [ns_server:info] [2012-03-26 1:09:32] [ns_1@127.0.0.1:<0.21387.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-3" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:32] [ns_1@127.0.0.1:<0.21356.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:32] [ns_1@127.0.0.1:<0.21356.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:32] [ns_1@127.0.0.1:<0.21356.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:32] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-3" with reason shutdown [ns_server:info] [2012-03-26 1:09:32] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"set-get-bucket-replica-3", [{num_replicas,3}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}, {"set-get-bucket-replica-2", [{num_replicas,2}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:09:32] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:09:32] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:09:32] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:09:32] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:09:32] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:09:34: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "set-get-bucket-replica-3", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "password", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/set-get-bucket-replica-3", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 3, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) moxi<0.18858.0>: 2012-03-26 01:09:34: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "set-get-bucket-replica-2", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "password", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/set-get-bucket-replica-2", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 2, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [error_logger:error] [2012-03-26 1:09:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.21383.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:09:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21407.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:warn] [2012-03-26 1:09:37] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "set-get-bucket-replica-1" within expected time. [ns_server:info] [2012-03-26 1:09:37] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:09:37] [ns_1@127.0.0.1:<0.21249.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "set-get-bucket-replica-1" [ns_server:info] [2012-03-26 1:09:37] [ns_1@127.0.0.1:<0.21412.0>:ns_janitor:wait_for_memcached:278] Waiting for "set-get-bucket-replica-2" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:37] [ns_1@127.0.0.1:<0.21316.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:37] [ns_1@127.0.0.1:<0.21316.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:37] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-2" with reason shutdown [ns_server:info] [2012-03-26 1:09:37] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"set-get-bucket-replica-3", [{num_replicas,3}, {replica_index,true}, {ram_quota,807403520}, {auth_type,sasl}, {sasl_password,"password"}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:09:37] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:09:37] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:09:37] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:09:37] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:09:37] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:09:39: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "set-get-bucket-replica-3", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "password", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/set-get-bucket-replica-3", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 3, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [error_logger:error] [2012-03-26 1:09:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-set-get-bucket-replica-3', topkeys,30000]}} Offender: [{pid,<0.21407.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:09:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21434.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:warn] [2012-03-26 1:09:42] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "set-get-bucket-replica-2" within expected time. [ns_server:info] [2012-03-26 1:09:42] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [ns_server:info] [2012-03-26 1:09:42] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["set-get-bucket-replica-1"], <0.21439.0>} [menelaus:info] [2012-03-26 1:09:42] [ns_1@127.0.0.1:<0.21331.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "set-get-bucket-replica-2" [ns_server:info] [2012-03-26 1:09:42] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "set-get-bucket-replica-1" with reason {badmatch, not_present} [error_logger:error] [2012-03-26 1:09:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21439.0> registered_name: [] exception error: no match of right hand side value not_present in function ns_janitor:cleanup/2 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [] trap_exit: false status: running heap_size: 10946 stack_size: 24 reductions: 848 neighbours: [stats:error] [2012-03-26 1:09:42] [ns_1@127.0.0.1:<0.21269.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:42] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:09:42] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:09:42] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:09:42] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:09:42] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:warn] [2012-03-26 1:09:47] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "set-get-bucket-replica-3" within expected time. [ns_server:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:<0.21281.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "set-get-bucket-replica-3" [ns_server:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: memory_quota -> 3082 [ns_server:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [menelaus:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:<0.21329.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:<0.21476.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:09:47] [ns_1@127.0.0.1:<0.21347.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:47] [ns_1@127.0.0.1:<0.21359.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:47] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:09:49: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 1:09:47] [ns_1@127.0.0.1:<0.21361.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:48] [ns_1@127.0.0.1:<0.21476.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:48] [ns_1@127.0.0.1:<0.21363.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:48] [ns_1@127.0.0.1:<0.21368.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:49] [ns_1@127.0.0.1:<0.21476.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:49] [ns_1@127.0.0.1:<0.21491.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.21476.0>} [error_logger:error] [2012-03-26 1:09:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21434.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:09:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21506.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:09:49] [ns_1@127.0.0.1:<0.21494.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:50] [ns_1@127.0.0.1:<0.21476.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:50] [ns_1@127.0.0.1:<0.21496.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:50] [ns_1@127.0.0.1:<0.21415.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:51] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:09:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21476.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:09:51] [ns_1@127.0.0.1:<0.21507.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:51] [ns_1@127.0.0.1:<0.21510.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:52] [ns_1@127.0.0.1:<0.21513.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:52] [ns_1@127.0.0.1:<0.21418.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:53] [ns_1@127.0.0.1:<0.21520.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:53] [ns_1@127.0.0.1:<0.21522.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:54] [ns_1@127.0.0.1:<0.21524.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:09:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21506.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:09:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21539.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:09:54] [ns_1@127.0.0.1:<0.21459.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:55] [ns_1@127.0.0.1:<0.21531.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:55] [ns_1@127.0.0.1:<0.21533.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:56] [ns_1@127.0.0.1:<0.21540.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:56] [ns_1@127.0.0.1:<0.21460.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:57] [ns_1@127.0.0.1:<0.21546.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:58] [ns_1@127.0.0.1:<0.21548.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:58] [ns_1@127.0.0.1:<0.21551.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:59] [ns_1@127.0.0.1:<0.21463.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:09:59] [ns_1@127.0.0.1:<0.21557.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:09:59] [ns_1@127.0.0.1:<0.21570.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:09:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,749399,504420}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37036384}, {processes,10184744}, {processes_used,8559264}, {system,26851640}, {atom,1306681}, {atom_used,1284164}, {binary,424200}, {code,12859877}, {ets,1795952}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,421}, {memory_data,{4040077312,4008615936,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 30724 kB\nBuffers: 54688 kB\nCached: 3531508 kB\nSwapCached: 0 kB\nActive: 303444 kB\nInactive: 3442960 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 30724 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 32 kB\nWriteback: 0 kB\nAnonPages: 160348 kB\nMapped: 24856 kB\nSlab: 134496 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577416 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616264192}, {buffered_memory,56000512}, {free_memory,31461376}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{416239,0}}, {context_switches,{496508,0}}, {garbage_collection,{240464,254184613,0}}, {io,{{input,17013731},{output,15238893}}}, {reductions,{117808895,671929}}, {run_queue,0}, {runtime,{18180,170}}]}]}] [error_logger:error] [2012-03-26 1:09:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21539.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:09:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21575.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:10:00] [ns_1@127.0.0.1:<0.21559.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:00] [ns_1@127.0.0.1:<0.21561.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:00] [ns_1@127.0.0.1:<0.21570.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:01] [ns_1@127.0.0.1:<0.21466.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:01] [ns_1@127.0.0.1:<0.21576.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:01] [ns_1@127.0.0.1:<0.21570.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:02] [ns_1@127.0.0.1:<0.21578.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:02] [ns_1@127.0.0.1:<0.21581.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:02] [ns_1@127.0.0.1:<0.21570.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:03] [ns_1@127.0.0.1:<0.21467.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:03] [ns_1@127.0.0.1:<0.21588.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:10:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21570.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:10:04] [ns_1@127.0.0.1:<0.21590.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:04] [ns_1@127.0.0.1:<0.21594.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:10:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21575.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:10:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21608.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:10:05] [ns_1@127.0.0.1:<0.21468.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:05] [ns_1@127.0.0.1:<0.21601.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:06] [ns_1@127.0.0.1:<0.21605.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:06] [ns_1@127.0.0.1:<0.21609.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:07] [ns_1@127.0.0.1:<0.21469.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:07] [ns_1@127.0.0.1:<0.21616.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:08] [ns_1@127.0.0.1:<0.21618.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:08] [ns_1@127.0.0.1:<0.21620.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:09] [ns_1@127.0.0.1:<0.21470.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:09] [ns_1@127.0.0.1:<0.21626.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:09] [ns_1@127.0.0.1:<0.21642.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:10:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21608.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:10:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21646.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:10:10] [ns_1@127.0.0.1:<0.21628.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:10] [ns_1@127.0.0.1:<0.21631.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:10] [ns_1@127.0.0.1:<0.21642.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:11] [ns_1@127.0.0.1:<0.21473.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:11] [ns_1@127.0.0.1:<0.21647.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:11] [ns_1@127.0.0.1:<0.21642.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:12] [ns_1@127.0.0.1:<0.21649.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:12] [ns_1@127.0.0.1:<0.21652.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:12] [ns_1@127.0.0.1:<0.21642.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:13] [ns_1@127.0.0.1:<0.21482.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:13] [ns_1@127.0.0.1:<0.21660.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:10:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21642.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:10:14] [ns_1@127.0.0.1:<0.21662.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:14] [ns_1@127.0.0.1:<0.21665.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:10:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21646.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:10:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21680.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:10:15] [ns_1@127.0.0.1:<0.21486.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:15] [ns_1@127.0.0.1:<0.21672.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:16] [ns_1@127.0.0.1:<0.21676.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:16] [ns_1@127.0.0.1:<0.21681.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:17] [ns_1@127.0.0.1:<0.21499.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:17] [ns_1@127.0.0.1:<0.21687.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:18] [ns_1@127.0.0.1:<0.21689.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:18] [ns_1@127.0.0.1:<0.21691.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:19] [ns_1@127.0.0.1:<0.21516.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:19] [ns_1@127.0.0.1:<0.21708.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:19] [ns_1@127.0.0.1:<0.21698.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:10:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21680.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:10:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21714.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:10:20] [ns_1@127.0.0.1:<0.21700.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:20] [ns_1@127.0.0.1:<0.21708.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:20] [ns_1@127.0.0.1:<0.21702.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:21] [ns_1@127.0.0.1:<0.21526.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:21] [ns_1@127.0.0.1:<0.21708.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:21] [ns_1@127.0.0.1:<0.21715.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:22] [ns_1@127.0.0.1:<0.21719.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:22] [ns_1@127.0.0.1:<0.21708.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:22] [ns_1@127.0.0.1:<0.21721.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:23] [ns_1@127.0.0.1:<0.21542.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:10:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21708.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:10:23] [ns_1@127.0.0.1:<0.21728.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:24] [ns_1@127.0.0.1:<0.21731.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:10:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21714.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:10:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21747.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:10:24] [ns_1@127.0.0.1:<0.21733.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:25] [ns_1@127.0.0.1:<0.21553.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:25] [ns_1@127.0.0.1:<0.21741.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:26] [ns_1@127.0.0.1:<0.21748.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:26] [ns_1@127.0.0.1:<0.21750.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:27] [ns_1@127.0.0.1:<0.21565.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:27] [ns_1@127.0.0.1:<0.21756.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:28] [ns_1@127.0.0.1:<0.21759.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:28] [ns_1@127.0.0.1:<0.21761.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:29] [ns_1@127.0.0.1:<0.21583.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:29] [ns_1@127.0.0.1:<0.21777.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:10:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21747.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:10:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21782.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:10:29] [ns_1@127.0.0.1:<0.21767.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:30] [ns_1@127.0.0.1:<0.21769.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:30] [ns_1@127.0.0.1:<0.21777.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:31] [ns_1@127.0.0.1:<0.21771.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:31] [ns_1@127.0.0.1:<0.21596.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:31] [ns_1@127.0.0.1:<0.21777.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:32] [ns_1@127.0.0.1:<0.21785.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:32] [ns_1@127.0.0.1:<0.21788.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:32] [ns_1@127.0.0.1:<0.21777.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:33] [ns_1@127.0.0.1:<0.21790.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:33] [ns_1@127.0.0.1:<0.21611.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:10:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21777.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:10:34] [ns_1@127.0.0.1:<0.21797.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:34] [ns_1@127.0.0.1:<0.21801.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:10:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21782.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:10:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21815.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:10:35] [ns_1@127.0.0.1:<0.21803.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:35] [ns_1@127.0.0.1:<0.21622.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:36] [ns_1@127.0.0.1:<0.21812.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:36] [ns_1@127.0.0.1:<0.21816.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:37] [ns_1@127.0.0.1:<0.21818.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:37] [ns_1@127.0.0.1:<0.21636.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:38] [ns_1@127.0.0.1:<0.21825.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:38] [ns_1@127.0.0.1:<0.21827.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:39] [ns_1@127.0.0.1:<0.21829.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:39] [ns_1@127.0.0.1:<0.21654.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:39] [ns_1@127.0.0.1:<0.21847.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:10:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21815.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:10:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21851.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:10:40] [ns_1@127.0.0.1:<0.21835.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:40] [ns_1@127.0.0.1:<0.21838.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:40] [ns_1@127.0.0.1:<0.21847.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:41] [ns_1@127.0.0.1:<0.21843.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:41] [ns_1@127.0.0.1:<0.21667.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:41] [ns_1@127.0.0.1:<0.21847.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:42] [ns_1@127.0.0.1:<0.21854.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:42] [ns_1@127.0.0.1:<0.21857.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:42] [ns_1@127.0.0.1:<0.21847.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:43] [ns_1@127.0.0.1:<0.21859.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:43] [ns_1@127.0.0.1:<0.21683.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:10:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21847.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:10:44] [ns_1@127.0.0.1:<0.21867.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:44] [ns_1@127.0.0.1:<0.21870.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:10:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21851.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:10:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21885.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:10:45] [ns_1@127.0.0.1:<0.21872.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:45] [ns_1@127.0.0.1:<0.21695.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:46] [ns_1@127.0.0.1:<0.21881.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:46] [ns_1@127.0.0.1:<0.21886.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:47] [ns_1@127.0.0.1:<0.21888.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:47] [ns_1@127.0.0.1:<0.21711.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:49] [ns_1@127.0.0.1:<0.21907.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:10:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21885.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:10:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21911.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:10:50] [ns_1@127.0.0.1:<0.21907.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:51] [ns_1@127.0.0.1:<0.21907.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:52] [ns_1@127.0.0.1:<0.21907.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:10:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21907.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 1:10:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21911.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:10:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21926.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:10:57] [ns_1@127.0.0.1:<0.21726.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:58] [ns_1@127.0.0.1:<0.21894.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:58] [ns_1@127.0.0.1:<0.21896.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:59] [ns_1@127.0.0.1:<0.21898.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:10:59] [ns_1@127.0.0.1:<0.21945.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:10:59] [ns_1@127.0.0.1:<0.21739.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:10:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,749459,529309}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37128216}, {processes,10205432}, {processes_used,8579952}, {system,26922784}, {atom,1306681}, {atom_used,1284164}, {binary,452480}, {code,12859877}, {ets,1824296}]}, {system_stats, [{cpu_utilization_rate,25.43640897755611}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,481}, {memory_data,{4040077312,4008615936,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 30848 kB\nBuffers: 54784 kB\nCached: 3531664 kB\nSwapCached: 0 kB\nActive: 303528 kB\nInactive: 3443096 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 30848 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 12 kB\nWriteback: 0 kB\nAnonPages: 160172 kB\nMapped: 24856 kB\nSlab: 134428 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577416 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616423936}, {buffered_memory,56098816}, {free_memory,31588352}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{476265,0}}, {context_switches,{508699,0}}, {garbage_collection,{247541,264320442,0}}, {io,{{input,17046797},{output,15670975}}}, {reductions,{120253361,521387}}, {run_queue,0}, {runtime,{18760,130}}]}]}] [error_logger:error] [2012-03-26 1:10:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21926.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:10:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21952.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:11:00] [ns_1@127.0.0.1:<0.21936.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:00] [ns_1@127.0.0.1:<0.21945.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:00] [ns_1@127.0.0.1:<0.21938.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:01] [ns_1@127.0.0.1:<0.21930.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:01] [ns_1@127.0.0.1:<0.21945.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:01] [ns_1@127.0.0.1:<0.21754.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:02] [ns_1@127.0.0.1:<0.21956.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:02] [ns_1@127.0.0.1:<0.21945.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:02] [ns_1@127.0.0.1:<0.21958.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:03] [ns_1@127.0.0.1:<0.21948.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:11:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21945.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:11:03] [ns_1@127.0.0.1:<0.21765.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:04] [ns_1@127.0.0.1:<0.21969.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:04] [ns_1@127.0.0.1:<0.21971.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:11:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21952.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:11:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21985.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:11:05] [ns_1@127.0.0.1:<0.21963.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:05] [ns_1@127.0.0.1:<0.21783.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:06] [ns_1@127.0.0.1:<0.21982.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:06] [ns_1@127.0.0.1:<0.21986.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:07] [ns_1@127.0.0.1:<0.21976.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:07] [ns_1@127.0.0.1:<0.21795.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:08] [ns_1@127.0.0.1:<0.21995.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:08] [ns_1@127.0.0.1:<0.21997.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:09] [ns_1@127.0.0.1:<0.21991.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:09] [ns_1@127.0.0.1:<0.22017.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:11:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21985.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:11:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22021.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:11:09] [ns_1@127.0.0.1:<0.21808.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:10] [ns_1@127.0.0.1:<0.22006.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:10] [ns_1@127.0.0.1:<0.22017.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:10] [ns_1@127.0.0.1:<0.22008.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:11] [ns_1@127.0.0.1:<0.22001.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:11] [ns_1@127.0.0.1:<0.22017.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:11] [ns_1@127.0.0.1:<0.21823.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:12] [ns_1@127.0.0.1:<0.22027.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:12] [ns_1@127.0.0.1:<0.22017.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:12] [ns_1@127.0.0.1:<0.22029.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:13] [ns_1@127.0.0.1:<0.22022.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:11:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22017.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:11:13] [ns_1@127.0.0.1:<0.21833.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:14] [ns_1@127.0.0.1:<0.22040.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:11:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22021.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:11:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22055.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:11:15] [ns_1@127.0.0.1:<0.22042.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:15] [ns_1@127.0.0.1:<0.22035.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:16] [ns_1@127.0.0.1:<0.21852.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:16] [ns_1@127.0.0.1:<0.22056.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:17] [ns_1@127.0.0.1:<0.22058.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:17] [ns_1@127.0.0.1:<0.22047.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:18] [ns_1@127.0.0.1:<0.21865.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:18] [ns_1@127.0.0.1:<0.22066.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:19] [ns_1@127.0.0.1:<0.22068.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:19] [ns_1@127.0.0.1:<0.22062.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:19] [ns_1@127.0.0.1:<0.22085.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:11:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22055.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:11:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22089.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:11:20] [ns_1@127.0.0.1:<0.21877.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:20] [ns_1@127.0.0.1:<0.22077.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:20] [ns_1@127.0.0.1:<0.22085.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:21] [ns_1@127.0.0.1:<0.22081.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:21] [ns_1@127.0.0.1:<0.22073.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:21] [ns_1@127.0.0.1:<0.22085.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:22] [ns_1@127.0.0.1:<0.21892.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:22] [ns_1@127.0.0.1:<0.22096.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:22] [ns_1@127.0.0.1:<0.22085.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:23] [ns_1@127.0.0.1:<0.22098.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:23] [ns_1@127.0.0.1:<0.22090.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:11:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22085.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:11:24] [ns_1@127.0.0.1:<0.21934.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:24] [ns_1@127.0.0.1:<0.22108.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:11:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22089.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:11:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22124.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:11:25] [ns_1@127.0.0.1:<0.22110.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:25] [ns_1@127.0.0.1:<0.22103.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:26] [ns_1@127.0.0.1:<0.21953.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:26] [ns_1@127.0.0.1:<0.22125.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:27] [ns_1@127.0.0.1:<0.22127.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:27] [ns_1@127.0.0.1:<0.22116.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:28] [ns_1@127.0.0.1:<0.21965.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:28] [ns_1@127.0.0.1:<0.22136.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:29] [ns_1@127.0.0.1:<0.22138.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:29] [ns_1@127.0.0.1:<0.22131.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:29] [ns_1@127.0.0.1:<0.22154.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:11:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22124.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:11:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22159.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:11:30] [ns_1@127.0.0.1:<0.21978.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:30] [ns_1@127.0.0.1:<0.22146.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:30] [ns_1@127.0.0.1:<0.22154.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:31] [ns_1@127.0.0.1:<0.22150.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:31] [ns_1@127.0.0.1:<0.22142.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:31] [ns_1@127.0.0.1:<0.22154.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:32] [ns_1@127.0.0.1:<0.21993.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:32] [ns_1@127.0.0.1:<0.22165.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:32] [ns_1@127.0.0.1:<0.22154.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:33] [ns_1@127.0.0.1:<0.22167.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:33] [ns_1@127.0.0.1:<0.22160.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:11:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22154.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:11:34] [ns_1@127.0.0.1:<0.22003.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:34] [ns_1@127.0.0.1:<0.22178.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:11:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22159.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:11:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22192.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:11:35] [ns_1@127.0.0.1:<0.22180.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:35] [ns_1@127.0.0.1:<0.22172.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:36] [ns_1@127.0.0.1:<0.22024.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:36] [ns_1@127.0.0.1:<0.22193.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:37] [ns_1@127.0.0.1:<0.22197.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:37] [ns_1@127.0.0.1:<0.22185.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:38] [ns_1@127.0.0.1:<0.22037.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:38] [ns_1@127.0.0.1:<0.22204.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:39] [ns_1@127.0.0.1:<0.22208.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:39] [ns_1@127.0.0.1:<0.22222.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:39] [ns_1@127.0.0.1:<0.22200.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:11:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22192.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:11:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22228.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:11:40] [ns_1@127.0.0.1:<0.22049.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:40] [ns_1@127.0.0.1:<0.22222.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:40] [ns_1@127.0.0.1:<0.22215.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:41] [ns_1@127.0.0.1:<0.22225.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:41] [ns_1@127.0.0.1:<0.22222.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:41] [ns_1@127.0.0.1:<0.22210.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:42] [ns_1@127.0.0.1:<0.22064.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:42] [ns_1@127.0.0.1:<0.22222.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:42] [ns_1@127.0.0.1:<0.22234.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:43] [ns_1@127.0.0.1:<0.22240.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:11:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22222.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:11:43] [ns_1@127.0.0.1:<0.22229.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:44] [ns_1@127.0.0.1:<0.22075.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:11:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22228.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:11:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22260.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:11:44] [ns_1@127.0.0.1:<0.22247.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:45] [ns_1@127.0.0.1:<0.22252.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:45] [ns_1@127.0.0.1:<0.22242.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:46] [ns_1@127.0.0.1:<0.22092.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:46] [ns_1@127.0.0.1:<0.22263.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:47] [ns_1@127.0.0.1:<0.22267.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:47] [ns_1@127.0.0.1:<0.22254.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:48] [ns_1@127.0.0.1:<0.22105.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:48] [ns_1@127.0.0.1:<0.22273.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:49] [ns_1@127.0.0.1:<0.22278.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:49] [ns_1@127.0.0.1:<0.22290.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:11:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22260.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:11:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22294.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:11:50] [ns_1@127.0.0.1:<0.22269.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:50] [ns_1@127.0.0.1:<0.22121.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:50] [ns_1@127.0.0.1:<0.22290.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:51] [ns_1@127.0.0.1:<0.22284.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:51] [ns_1@127.0.0.1:<0.22295.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:51] [ns_1@127.0.0.1:<0.22290.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:52] [ns_1@127.0.0.1:<0.22280.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:52] [ns_1@127.0.0.1:<0.22133.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:52] [ns_1@127.0.0.1:<0.22290.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:53] [ns_1@127.0.0.1:<0.22303.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:53] [ns_1@127.0.0.1:<0.22308.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:11:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22290.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:11:54] [ns_1@127.0.0.1:<0.22297.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:54] [ns_1@127.0.0.1:<0.22144.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:11:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22294.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:11:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22329.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:11:55] [ns_1@127.0.0.1:<0.22315.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:55] [ns_1@127.0.0.1:<0.22321.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:56] [ns_1@127.0.0.1:<0.22310.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:56] [ns_1@127.0.0.1:<0.22162.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:57] [ns_1@127.0.0.1:<0.22332.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:57] [ns_1@127.0.0.1:<0.22336.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:58] [ns_1@127.0.0.1:<0.22326.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:58] [ns_1@127.0.0.1:<0.22338.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:58] [ns_1@127.0.0.1:<0.22174.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:58] [ns_1@127.0.0.1:<0.22189.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:59] [ns_1@127.0.0.1:<0.22343.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:11:59] [ns_1@127.0.0.1:<0.22347.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:11:59] [ns_1@127.0.0.1:<0.22379.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:11:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,749519,556235}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37260392}, {processes,10291848}, {processes_used,8666368}, {system,26968544}, {atom,1306681}, {atom_used,1284164}, {binary,447368}, {code,12859877}, {ets,1853272}]}, {system_stats, [{cpu_utilization_rate,25.12562814070352}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,541}, {memory_data,{4040077312,4008742912,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 30600 kB\nBuffers: 54880 kB\nCached: 3531820 kB\nSwapCached: 0 kB\nActive: 303656 kB\nInactive: 3443228 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 30600 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 12 kB\nWriteback: 0 kB\nAnonPages: 160296 kB\nMapped: 24856 kB\nSlab: 134424 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577416 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616583680}, {buffered_memory,56197120}, {free_memory,31334400}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{536292,0}}, {context_switches,{521201,0}}, {garbage_collection,{254719,274994272,0}}, {io,{{input,17077386},{output,16101346}}}, {reductions,{122775059,647273}}, {run_queue,0}, {runtime,{19320,150}}]}]}] [error_logger:error] [2012-03-26 1:11:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22329.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:11:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22384.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:12:00] [ns_1@127.0.0.1:<0.22353.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:00] [ns_1@127.0.0.1:<0.22202.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:00] [ns_1@127.0.0.1:<0.22379.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:01] [ns_1@127.0.0.1:<0.22374.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:01] [ns_1@127.0.0.1:<0.22349.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:01] [ns_1@127.0.0.1:<0.22379.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:02] [ns_1@127.0.0.1:<0.22387.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:02] [ns_1@127.0.0.1:<0.22213.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:02] [ns_1@127.0.0.1:<0.22379.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:03] [ns_1@127.0.0.1:<0.22392.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:03] [ns_1@127.0.0.1:<0.22351.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:12:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22379.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:12:04] [ns_1@127.0.0.1:<0.22399.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:04] [ns_1@127.0.0.1:<0.22232.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:12:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22384.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:12:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22417.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:12:05] [ns_1@127.0.0.1:<0.22405.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:05] [ns_1@127.0.0.1:<0.22385.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:06] [ns_1@127.0.0.1:<0.22414.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:06] [ns_1@127.0.0.1:<0.22245.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:07] [ns_1@127.0.0.1:<0.22420.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:07] [ns_1@127.0.0.1:<0.22397.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:08] [ns_1@127.0.0.1:<0.22427.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:08] [ns_1@127.0.0.1:<0.22261.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:09] [ns_1@127.0.0.1:<0.22431.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:09] [ns_1@127.0.0.1:<0.22449.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:09] [ns_1@127.0.0.1:<0.22410.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:12:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22417.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:12:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22455.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:12:10] [ns_1@127.0.0.1:<0.22437.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:10] [ns_1@127.0.0.1:<0.22449.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:10] [ns_1@127.0.0.1:<0.22271.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:11] [ns_1@127.0.0.1:<0.22452.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:11] [ns_1@127.0.0.1:<0.22449.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:11] [ns_1@127.0.0.1:<0.22425.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:12] [ns_1@127.0.0.1:<0.22459.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:12] [ns_1@127.0.0.1:<0.22449.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:12] [ns_1@127.0.0.1:<0.22282.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:13] [ns_1@127.0.0.1:<0.22467.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:12:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22449.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:12:13] [ns_1@127.0.0.1:<0.22435.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:14] [ns_1@127.0.0.1:<0.22472.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:14] [ns_1@127.0.0.1:<0.22301.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:12:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22455.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:12:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22489.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:12:15] [ns_1@127.0.0.1:<0.22479.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:15] [ns_1@127.0.0.1:<0.22456.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:16] [ns_1@127.0.0.1:<0.22486.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:16] [ns_1@127.0.0.1:<0.22313.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:17] [ns_1@127.0.0.1:<0.22494.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:17] [ns_1@127.0.0.1:<0.22469.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:18] [ns_1@127.0.0.1:<0.22498.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:18] [ns_1@127.0.0.1:<0.22330.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:19] [ns_1@127.0.0.1:<0.22505.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:19] [ns_1@127.0.0.1:<0.22517.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:12:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22489.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:12:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22521.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:12:19] [ns_1@127.0.0.1:<0.22481.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:20] [ns_1@127.0.0.1:<0.22509.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:20] [ns_1@127.0.0.1:<0.22517.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:20] [ns_1@127.0.0.1:<0.22341.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:21] [ns_1@127.0.0.1:<0.22522.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:21] [ns_1@127.0.0.1:<0.22517.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:21] [ns_1@127.0.0.1:<0.22496.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:22] [ns_1@127.0.0.1:<0.22528.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:22] [ns_1@127.0.0.1:<0.22517.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:22] [ns_1@127.0.0.1:<0.22355.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:23] [ns_1@127.0.0.1:<0.22535.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:12:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22517.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:12:24] [ns_1@127.0.0.1:<0.22507.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:24] [ns_1@127.0.0.1:<0.22540.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:12:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22521.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:12:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22556.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:12:25] [ns_1@127.0.0.1:<0.22390.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:25] [ns_1@127.0.0.1:<0.22548.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:26] [ns_1@127.0.0.1:<0.22524.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:26] [ns_1@127.0.0.1:<0.22557.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:27] [ns_1@127.0.0.1:<0.22403.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:27] [ns_1@127.0.0.1:<0.22563.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:28] [ns_1@127.0.0.1:<0.22537.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:28] [ns_1@127.0.0.1:<0.22568.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:29] [ns_1@127.0.0.1:<0.22418.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:29] [ns_1@127.0.0.1:<0.22574.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:29] [ns_1@127.0.0.1:<0.22586.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:12:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22556.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:12:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22591.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:12:30] [ns_1@127.0.0.1:<0.22550.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:30] [ns_1@127.0.0.1:<0.22578.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:30] [ns_1@127.0.0.1:<0.22586.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:31] [ns_1@127.0.0.1:<0.22429.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:31] [ns_1@127.0.0.1:<0.22592.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:31] [ns_1@127.0.0.1:<0.22586.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:32] [ns_1@127.0.0.1:<0.22565.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:32] [ns_1@127.0.0.1:<0.22597.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:32] [ns_1@127.0.0.1:<0.22586.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:33] [ns_1@127.0.0.1:<0.22440.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:33] [ns_1@127.0.0.1:<0.22604.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:12:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22586.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:12:34] [ns_1@127.0.0.1:<0.22576.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:34] [ns_1@127.0.0.1:<0.22610.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:12:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22591.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:12:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22624.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:12:35] [ns_1@127.0.0.1:<0.22461.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:35] [ns_1@127.0.0.1:<0.22617.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:36] [ns_1@127.0.0.1:<0.22594.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:36] [ns_1@127.0.0.1:<0.22625.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:37] [ns_1@127.0.0.1:<0.22474.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:37] [ns_1@127.0.0.1:<0.22632.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:38] [ns_1@127.0.0.1:<0.22606.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:38] [ns_1@127.0.0.1:<0.22636.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:39] [ns_1@127.0.0.1:<0.22490.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:39] [ns_1@127.0.0.1:<0.22642.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:39] [ns_1@127.0.0.1:<0.22656.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:12:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22624.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:12:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22660.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:12:40] [ns_1@127.0.0.1:<0.22621.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:40] [ns_1@127.0.0.1:<0.22647.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:40] [ns_1@127.0.0.1:<0.22656.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:41] [ns_1@127.0.0.1:<0.22500.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:41] [ns_1@127.0.0.1:<0.22661.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:41] [ns_1@127.0.0.1:<0.22656.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:42] [ns_1@127.0.0.1:<0.22634.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:42] [ns_1@127.0.0.1:<0.22666.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:42] [ns_1@127.0.0.1:<0.22656.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:43] [ns_1@127.0.0.1:<0.22511.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:12:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22656.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:12:43] [ns_1@127.0.0.1:<0.22674.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:44] [ns_1@127.0.0.1:<0.22644.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:44] [ns_1@127.0.0.1:<0.22679.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:12:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22660.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:12:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22694.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:12:45] [ns_1@127.0.0.1:<0.22530.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:45] [ns_1@127.0.0.1:<0.22686.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:46] [ns_1@127.0.0.1:<0.22663.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:46] [ns_1@127.0.0.1:<0.22695.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:47] [ns_1@127.0.0.1:<0.22542.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:47] [ns_1@127.0.0.1:<0.22701.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:48] [ns_1@127.0.0.1:<0.22676.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:48] [ns_1@127.0.0.1:<0.22705.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:49] [ns_1@127.0.0.1:<0.22559.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:49] [ns_1@127.0.0.1:<0.22722.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:49] [ns_1@127.0.0.1:<0.22712.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:12:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22694.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:12:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22728.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:12:50] [ns_1@127.0.0.1:<0.22690.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:50] [ns_1@127.0.0.1:<0.22722.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:50] [ns_1@127.0.0.1:<0.22716.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:51] [ns_1@127.0.0.1:<0.22570.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:51] [ns_1@127.0.0.1:<0.22722.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:51] [ns_1@127.0.0.1:<0.22729.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:52] [ns_1@127.0.0.1:<0.22703.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:52] [ns_1@127.0.0.1:<0.22722.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:52] [ns_1@127.0.0.1:<0.22735.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:53] [ns_1@127.0.0.1:<0.22582.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:12:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22722.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:12:53] [ns_1@127.0.0.1:<0.22742.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:54] [ns_1@127.0.0.1:<0.22714.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:12:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22728.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:12:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22761.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:12:54] [ns_1@127.0.0.1:<0.22747.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:55] [ns_1@127.0.0.1:<0.22599.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:55] [ns_1@127.0.0.1:<0.22755.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:56] [ns_1@127.0.0.1:<0.22733.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:56] [ns_1@127.0.0.1:<0.22764.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:57] [ns_1@127.0.0.1:<0.22612.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:57] [ns_1@127.0.0.1:<0.22770.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:58] [ns_1@127.0.0.1:<0.22745.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:58] [ns_1@127.0.0.1:<0.22762.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:58] [ns_1@127.0.0.1:<0.22773.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:58] [ns_1@127.0.0.1:<0.22775.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:59] [ns_1@127.0.0.1:<0.22627.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:12:59] [ns_1@127.0.0.1:<0.22638.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:12:59] [ns_1@127.0.0.1:<0.22798.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:12:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,749579,583753}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37148736}, {processes,10131568}, {processes_used,8506088}, {system,27017168}, {atom,1306681}, {atom_used,1284164}, {binary,448208}, {code,12859877}, {ets,1884400}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,601}, {memory_data,{4040077312,4009250816,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 30104 kB\nBuffers: 55040 kB\nCached: 3531964 kB\nSwapCached: 0 kB\nActive: 303592 kB\nInactive: 3443608 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 30104 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 0 kB\nWriteback: 0 kB\nAnonPages: 160192 kB\nMapped: 24856 kB\nSlab: 134416 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577416 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616731136}, {buffered_memory,56360960}, {free_memory,30826496}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{596320,0}}, {context_switches,{534478,0}}, {garbage_collection,{262091,286399460,0}}, {io,{{input,17354001},{output,16767264}}}, {reductions,{125434683,670593}}, {run_queue,0}, {runtime,{19850,130}}]}]}] [error_logger:error] [2012-03-26 1:12:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22761.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:12:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22803.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:13:00] [ns_1@127.0.0.1:<0.22781.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:00] [ns_1@127.0.0.1:<0.22789.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:00] [ns_1@127.0.0.1:<0.22798.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:01] [ns_1@127.0.0.1:<0.22791.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:01] [ns_1@127.0.0.1:<0.22652.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:01] [ns_1@127.0.0.1:<0.22798.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:02] [ns_1@127.0.0.1:<0.22783.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:02] [ns_1@127.0.0.1:<0.22809.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:02] [ns_1@127.0.0.1:<0.22798.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:03] [ns_1@127.0.0.1:<0.22811.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:03] [ns_1@127.0.0.1:<0.22668.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:13:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22798.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:13:04] [ns_1@127.0.0.1:<0.22785.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:04] [ns_1@127.0.0.1:<0.22822.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:13:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22803.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:13:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22836.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:13:05] [ns_1@127.0.0.1:<0.22824.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:05] [ns_1@127.0.0.1:<0.22684.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:06] [ns_1@127.0.0.1:<0.22787.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:06] [ns_1@127.0.0.1:<0.22837.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:07] [ns_1@127.0.0.1:<0.22839.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:07] [ns_1@127.0.0.1:<0.22699.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:08] [ns_1@127.0.0.1:<0.22806.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:08] [ns_1@127.0.0.1:<0.22848.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:09] [ns_1@127.0.0.1:<0.22850.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:09] [ns_1@127.0.0.1:<0.22710.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:09] [ns_1@127.0.0.1:<0.22870.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:13:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22836.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:13:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22874.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:13:10] [ns_1@127.0.0.1:<0.22818.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:10] [ns_1@127.0.0.1:<0.22859.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:10] [ns_1@127.0.0.1:<0.22870.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:11] [ns_1@127.0.0.1:<0.22864.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:11] [ns_1@127.0.0.1:<0.22725.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:11] [ns_1@127.0.0.1:<0.22870.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:12] [ns_1@127.0.0.1:<0.22833.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:12] [ns_1@127.0.0.1:<0.22880.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:12] [ns_1@127.0.0.1:<0.22870.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:13] [ns_1@127.0.0.1:<0.22882.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:13] [ns_1@127.0.0.1:<0.22740.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:13:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22870.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:13:14] [ns_1@127.0.0.1:<0.22846.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:14] [ns_1@127.0.0.1:<0.22893.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:13:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22874.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:13:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22908.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:13:15] [ns_1@127.0.0.1:<0.22895.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:15] [ns_1@127.0.0.1:<0.22753.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:16] [ns_1@127.0.0.1:<0.22856.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:16] [ns_1@127.0.0.1:<0.22909.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:17] [ns_1@127.0.0.1:<0.22911.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:17] [ns_1@127.0.0.1:<0.22768.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:18] [ns_1@127.0.0.1:<0.22877.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:18] [ns_1@127.0.0.1:<0.22919.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:19] [ns_1@127.0.0.1:<0.22923.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:19] [ns_1@127.0.0.1:<0.22936.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:19] [ns_1@127.0.0.1:<0.22779.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:13:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22908.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:13:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22942.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:13:20] [ns_1@127.0.0.1:<0.22890.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:20] [ns_1@127.0.0.1:<0.22936.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:20] [ns_1@127.0.0.1:<0.22930.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:21] [ns_1@127.0.0.1:<0.22939.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:21] [ns_1@127.0.0.1:<0.22936.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:21] [ns_1@127.0.0.1:<0.22804.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:22] [ns_1@127.0.0.1:<0.22904.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:22] [ns_1@127.0.0.1:<0.22936.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:22] [ns_1@127.0.0.1:<0.22949.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:23] [ns_1@127.0.0.1:<0.22954.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:13:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22936.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:13:23] [ns_1@127.0.0.1:<0.22816.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:24] [ns_1@127.0.0.1:<0.22917.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:24] [ns_1@127.0.0.1:<0.22961.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:13:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22942.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:13:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22977.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:13:25] [ns_1@127.0.0.1:<0.22967.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:25] [ns_1@127.0.0.1:<0.22829.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:26] [ns_1@127.0.0.1:<0.22928.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:26] [ns_1@127.0.0.1:<0.22978.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:27] [ns_1@127.0.0.1:<0.22982.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:27] [ns_1@127.0.0.1:<0.22844.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:28] [ns_1@127.0.0.1:<0.22947.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:28] [ns_1@127.0.0.1:<0.22989.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:29] [ns_1@127.0.0.1:<0.22993.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:29] [ns_1@127.0.0.1:<0.23005.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:13:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22977.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:13:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23010.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:13:29] [ns_1@127.0.0.1:<0.22854.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:30] [ns_1@127.0.0.1:<0.22959.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:30] [ns_1@127.0.0.1:<0.23005.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:30] [ns_1@127.0.0.1:<0.22999.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:31] [ns_1@127.0.0.1:<0.23011.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:31] [ns_1@127.0.0.1:<0.23005.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:31] [ns_1@127.0.0.1:<0.22875.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:32] [ns_1@127.0.0.1:<0.22974.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:32] [ns_1@127.0.0.1:<0.23005.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:33] [ns_1@127.0.0.1:<0.23018.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:33] [ns_1@127.0.0.1:<0.23023.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:13:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23005.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:13:34] [ns_1@127.0.0.1:<0.22888.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:34] [ns_1@127.0.0.1:<0.22987.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:13:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23010.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:13:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23043.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:13:35] [ns_1@127.0.0.1:<0.23031.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:35] [ns_1@127.0.0.1:<0.23036.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:36] [ns_1@127.0.0.1:<0.22900.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:36] [ns_1@127.0.0.1:<0.22997.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:37] [ns_1@127.0.0.1:<0.23046.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:37] [ns_1@127.0.0.1:<0.23051.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:38] [ns_1@127.0.0.1:<0.22915.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:38] [ns_1@127.0.0.1:<0.23016.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:39] [ns_1@127.0.0.1:<0.23057.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:39] [ns_1@127.0.0.1:<0.23061.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:39] [ns_1@127.0.0.1:<0.23075.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:13:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23043.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:13:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23079.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:13:40] [ns_1@127.0.0.1:<0.22926.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:40] [ns_1@127.0.0.1:<0.23029.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:40] [ns_1@127.0.0.1:<0.23075.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:41] [ns_1@127.0.0.1:<0.23068.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:41] [ns_1@127.0.0.1:<0.23080.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:41] [ns_1@127.0.0.1:<0.23075.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:42] [ns_1@127.0.0.1:<0.22943.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:42] [ns_1@127.0.0.1:<0.23044.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:42] [ns_1@127.0.0.1:<0.23075.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:43] [ns_1@127.0.0.1:<0.23087.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:43] [ns_1@127.0.0.1:<0.23093.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:13:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23075.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:13:44] [ns_1@127.0.0.1:<0.22956.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:44] [ns_1@127.0.0.1:<0.23055.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:13:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23079.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:13:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23113.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:13:45] [ns_1@127.0.0.1:<0.23100.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:45] [ns_1@127.0.0.1:<0.23105.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:46] [ns_1@127.0.0.1:<0.22969.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:46] [ns_1@127.0.0.1:<0.23066.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:47] [ns_1@127.0.0.1:<0.23116.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:47] [ns_1@127.0.0.1:<0.23120.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:48] [ns_1@127.0.0.1:<0.22984.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:48] [ns_1@127.0.0.1:<0.23085.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:49] [ns_1@127.0.0.1:<0.23126.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:49] [ns_1@127.0.0.1:<0.23131.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:49] [ns_1@127.0.0.1:<0.23143.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:13:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23113.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:13:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23147.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:13:50] [ns_1@127.0.0.1:<0.22995.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:50] [ns_1@127.0.0.1:<0.23098.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:50] [ns_1@127.0.0.1:<0.23143.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:51] [ns_1@127.0.0.1:<0.23139.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:51] [ns_1@127.0.0.1:<0.23143.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:51] [ns_1@127.0.0.1:<0.23148.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:52] [ns_1@127.0.0.1:<0.23013.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:52] [ns_1@127.0.0.1:<0.23143.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:52] [ns_1@127.0.0.1:<0.23114.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:53] [ns_1@127.0.0.1:<0.23159.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:13:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23143.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:13:53] [ns_1@127.0.0.1:<0.23161.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:54] [ns_1@127.0.0.1:<0.23025.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:54] [ns_1@127.0.0.1:<0.23124.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:13:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23147.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:13:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23182.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:13:55] [ns_1@127.0.0.1:<0.23171.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:55] [ns_1@127.0.0.1:<0.23174.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:56] [ns_1@127.0.0.1:<0.23038.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:56] [ns_1@127.0.0.1:<0.23135.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:57] [ns_1@127.0.0.1:<0.23187.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:57] [ns_1@127.0.0.1:<0.23189.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:58] [ns_1@127.0.0.1:<0.23053.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:58] [ns_1@127.0.0.1:<0.23154.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:58] [ns_1@127.0.0.1:<0.23166.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:13:58] [ns_1@127.0.0.1:<0.23183.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:13:58] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:13:58] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:13:58] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:13:58] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:13:58] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_doctor:info] [2012-03-26 1:13:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,749639,615272}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37414208}, {processes,10426792}, {processes_used,8804232}, {system,26987416}, {atom,1306681}, {atom_used,1284164}, {binary,448928}, {code,12859877}, {ets,1836944}]}, {system_stats, [{cpu_utilization_rate,25.25}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,661}, {memory_data,{4040077312,4009250816,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 30104 kB\nBuffers: 55136 kB\nCached: 3532128 kB\nSwapCached: 0 kB\nActive: 303680 kB\nInactive: 3443780 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 30104 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 0 kB\nWriteback: 0 kB\nAnonPages: 160196 kB\nMapped: 24856 kB\nSlab: 134420 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577416 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616899072}, {buffered_memory,56459264}, {free_memory,30826496}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{656351,0}}, {context_switches,{547374,0}}, {garbage_collection,{269373,297621700,0}}, {io,{{input,17384608},{output,17215100}}}, {reductions,{128054875,649556}}, {run_queue,0}, {runtime,{20480,160}}]}]}] [ns_server:warn] [2012-03-26 1:14:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 1:14:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:14:03] [ns_1@127.0.0.1:<0.23194.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [ns_server:info] [2012-03-26 1:14:03] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:11("Deleted bucket \"default\"\n") because it's been seen 1 times in the past 271.705084 secs (last seen 271.705084 secs ago [menelaus:info] [2012-03-26 1:14:04] [ns_1@127.0.0.1:<0.23150.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:14:04] [ns_1@127.0.0.1:ns_log:ns_log:handle_cast:115] suppressing duplicate log menelaus_web:12("Created bucket \"default\" of type: membase\n") because it's been seen 1 times in the past 256.694776 secs (last seen 256.694776 secs ago [ns_server:info] [2012-03-26 1:14:04] [ns_1@127.0.0.1:<0.23244.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:04] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:14:04] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:14:04] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:14:04] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:14:04] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:14:04] [ns_1@127.0.0.1:<0.23164.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:04] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:14:04] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:14:04] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:14:04] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:14:04] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:14:06: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 1:14:04] [ns_1@127.0.0.1:<0.23179.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:14:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23182.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:14:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23259.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:14:05] [ns_1@127.0.0.1:<0.23244.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:05] [ns_1@127.0.0.1:<0.23198.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:05] [ns_1@127.0.0.1:<0.23229.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:06] [ns_1@127.0.0.1:<0.23244.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:06] [ns_1@127.0.0.1:<0.23254.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:06] [ns_1@127.0.0.1:<0.23192.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:07] [ns_1@127.0.0.1:<0.23244.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:07] [ns_1@127.0.0.1:<0.23263.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:07] [ns_1@127.0.0.1:<0.23230.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:08] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:14:08] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23244.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:14:08] [ns_1@127.0.0.1:<0.23271.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:08] [ns_1@127.0.0.1:<0.23202.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:09] [ns_1@127.0.0.1:<0.23276.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:09] [ns_1@127.0.0.1:<0.23233.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:09] [ns_1@127.0.0.1:<0.23297.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:14:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23259.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:14:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23301.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:14:10] [ns_1@127.0.0.1:<0.23283.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:10] [ns_1@127.0.0.1:<0.23204.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:10] [ns_1@127.0.0.1:<0.23297.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:11] [ns_1@127.0.0.1:<0.23291.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:11] [ns_1@127.0.0.1:<0.23236.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:11] [ns_1@127.0.0.1:<0.23297.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:12] [ns_1@127.0.0.1:<0.23304.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:12] [ns_1@127.0.0.1:<0.23206.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:12] [ns_1@127.0.0.1:<0.23297.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:13] [ns_1@127.0.0.1:<0.23309.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:13] [ns_1@127.0.0.1:<0.23237.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:14:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23297.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:14:14] [ns_1@127.0.0.1:<0.23317.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:14] [ns_1@127.0.0.1:<0.23208.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:14:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23301.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:14:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23335.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:14:15] [ns_1@127.0.0.1:<0.23322.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:15] [ns_1@127.0.0.1:<0.23238.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:16] [ns_1@127.0.0.1:<0.23331.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:16] [ns_1@127.0.0.1:<0.23261.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:17] [ns_1@127.0.0.1:<0.23338.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:17] [ns_1@127.0.0.1:<0.23239.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:18] [ns_1@127.0.0.1:<0.23344.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:18] [ns_1@127.0.0.1:<0.23274.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:19] [ns_1@127.0.0.1:<0.23348.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:19] [ns_1@127.0.0.1:<0.23240.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:19] [ns_1@127.0.0.1:<0.23365.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:14:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23335.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:14:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23369.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:14:20] [ns_1@127.0.0.1:<0.23355.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:20] [ns_1@127.0.0.1:<0.23365.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:20] [ns_1@127.0.0.1:<0.23286.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:21] [ns_1@127.0.0.1:<0.23361.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:21] [ns_1@127.0.0.1:<0.23365.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:21] [ns_1@127.0.0.1:<0.23246.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:22] [ns_1@127.0.0.1:<0.23373.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:22] [ns_1@127.0.0.1:<0.23365.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:22] [ns_1@127.0.0.1:<0.23307.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:23] [ns_1@127.0.0.1:<0.23381.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:14:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23365.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:14:23] [ns_1@127.0.0.1:<0.23269.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:24] [ns_1@127.0.0.1:<0.23386.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:24] [ns_1@127.0.0.1:<0.23320.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:14:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23369.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:14:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23404.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:14:25] [ns_1@127.0.0.1:<0.23394.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:25] [ns_1@127.0.0.1:<0.23281.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:26] [ns_1@127.0.0.1:<0.23401.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:26] [ns_1@127.0.0.1:<0.23336.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:27] [ns_1@127.0.0.1:<0.23409.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:27] [ns_1@127.0.0.1:<0.23302.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:28] [ns_1@127.0.0.1:<0.23414.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:28] [ns_1@127.0.0.1:<0.23346.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:29] [ns_1@127.0.0.1:<0.23420.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:29] [ns_1@127.0.0.1:<0.23432.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:14:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23404.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:14:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23437.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:14:29] [ns_1@127.0.0.1:<0.23315.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:30] [ns_1@127.0.0.1:<0.23424.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:30] [ns_1@127.0.0.1:<0.23432.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:30] [ns_1@127.0.0.1:<0.23357.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:31] [ns_1@127.0.0.1:<0.23438.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:31] [ns_1@127.0.0.1:<0.23432.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:31] [ns_1@127.0.0.1:<0.23327.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:32] [ns_1@127.0.0.1:<0.23443.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:32] [ns_1@127.0.0.1:<0.23432.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:32] [ns_1@127.0.0.1:<0.23376.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:33] [ns_1@127.0.0.1:<0.23450.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:14:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23432.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:14:33] [ns_1@127.0.0.1:<0.23342.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:34] [ns_1@127.0.0.1:<0.23456.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:14:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23437.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:14:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23470.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:14:34] [ns_1@127.0.0.1:<0.23388.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:35] [ns_1@127.0.0.1:<0.23463.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:36] [ns_1@127.0.0.1:<0.23353.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:36] [ns_1@127.0.0.1:<0.23471.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:37] [ns_1@127.0.0.1:<0.23405.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:37] [ns_1@127.0.0.1:<0.23478.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:38] [ns_1@127.0.0.1:<0.23370.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:38] [ns_1@127.0.0.1:<0.23482.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:39] [ns_1@127.0.0.1:<0.23416.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:39] [ns_1@127.0.0.1:<0.23488.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:39] [ns_1@127.0.0.1:<0.23502.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:14:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23470.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:14:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23506.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:14:40] [ns_1@127.0.0.1:<0.23383.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:40] [ns_1@127.0.0.1:<0.23493.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:40] [ns_1@127.0.0.1:<0.23502.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:41] [ns_1@127.0.0.1:<0.23426.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:41] [ns_1@127.0.0.1:<0.23507.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:41] [ns_1@127.0.0.1:<0.23502.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:42] [ns_1@127.0.0.1:<0.23396.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:42] [ns_1@127.0.0.1:<0.23512.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:42] [ns_1@127.0.0.1:<0.23502.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:43] [ns_1@127.0.0.1:<0.23445.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:43] [ns_1@127.0.0.1:<0.23520.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:14:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23502.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:14:44] [ns_1@127.0.0.1:<0.23411.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:44] [ns_1@127.0.0.1:<0.23525.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:14:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23506.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:14:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23540.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:14:45] [ns_1@127.0.0.1:<0.23458.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:45] [ns_1@127.0.0.1:<0.23532.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:46] [ns_1@127.0.0.1:<0.23422.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:46] [ns_1@127.0.0.1:<0.23541.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:47] [ns_1@127.0.0.1:<0.23473.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:47] [ns_1@127.0.0.1:<0.23547.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:48] [ns_1@127.0.0.1:<0.23440.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:48] [ns_1@127.0.0.1:<0.23551.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:49] [ns_1@127.0.0.1:<0.23484.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:49] [ns_1@127.0.0.1:<0.23558.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:49] [ns_1@127.0.0.1:<0.23570.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:14:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23540.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:14:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23574.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:14:50] [ns_1@127.0.0.1:<0.23452.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:50] [ns_1@127.0.0.1:<0.23562.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:50] [ns_1@127.0.0.1:<0.23570.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:51] [ns_1@127.0.0.1:<0.23495.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:51] [ns_1@127.0.0.1:<0.23575.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:51] [ns_1@127.0.0.1:<0.23570.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:52] [ns_1@127.0.0.1:<0.23465.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:52] [ns_1@127.0.0.1:<0.23581.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:52] [ns_1@127.0.0.1:<0.23570.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:53] [ns_1@127.0.0.1:<0.23514.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:53] [ns_1@127.0.0.1:<0.23588.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:14:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23570.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:14:54] [ns_1@127.0.0.1:<0.23480.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:54] [ns_1@127.0.0.1:<0.23593.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:14:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23574.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:14:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23609.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:14:55] [ns_1@127.0.0.1:<0.23527.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:55] [ns_1@127.0.0.1:<0.23601.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:56] [ns_1@127.0.0.1:<0.23490.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:56] [ns_1@127.0.0.1:<0.23610.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:57] [ns_1@127.0.0.1:<0.23543.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:57] [ns_1@127.0.0.1:<0.23616.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:58] [ns_1@127.0.0.1:<0.23509.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:58] [ns_1@127.0.0.1:<0.23621.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:14:59] [ns_1@127.0.0.1:<0.23553.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:14:59] [ns_1@127.0.0.1:<0.23651.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:14:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,749699,640209}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37406896}, {processes,10377752}, {processes_used,8752272}, {system,27029144}, {atom,1306681}, {atom_used,1284164}, {binary,444496}, {code,12859877}, {ets,1867928}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,721}, {memory_data,{4040077312,4010266624,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 29112 kB\nBuffers: 55256 kB\nCached: 3532288 kB\nSwapCached: 0 kB\nActive: 304188 kB\nInactive: 3443892 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 29112 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 8 kB\nWriteback: 0 kB\nAnonPages: 160448 kB\nMapped: 24856 kB\nSlab: 134428 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579892 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3617062912}, {buffered_memory,56582144}, {free_memory,29810688}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{716376,0}}, {context_switches,{560642,0}}, {garbage_collection,{276887,309398303,0}}, {io,{{input,17418346},{output,17671866}}}, {reductions,{130744164,660376}}, {run_queue,0}, {runtime,{21120,160}}]}]}] [stats:error] [2012-03-26 1:14:59] [ns_1@127.0.0.1:<0.23627.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:14:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23609.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:14:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23658.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:15:00] [ns_1@127.0.0.1:<0.23522.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:00] [ns_1@127.0.0.1:<0.23651.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:00] [ns_1@127.0.0.1:<0.23631.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:01] [ns_1@127.0.0.1:<0.23566.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:01] [ns_1@127.0.0.1:<0.23651.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:01] [ns_1@127.0.0.1:<0.23659.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:02] [ns_1@127.0.0.1:<0.23534.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:02] [ns_1@127.0.0.1:<0.23651.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:02] [ns_1@127.0.0.1:<0.23664.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:03] [ns_1@127.0.0.1:<0.23583.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:15:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23651.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:15:03] [ns_1@127.0.0.1:<0.23671.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:04] [ns_1@127.0.0.1:<0.23549.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:15:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23658.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:15:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23689.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:15:09] [ns_1@127.0.0.1:<0.23703.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:15:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23689.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:15:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23707.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:15:10] [ns_1@127.0.0.1:<0.23703.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:11] [ns_1@127.0.0.1:<0.23703.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:12] [ns_1@127.0.0.1:<0.23703.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:15:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23703.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:15:14] [ns_1@127.0.0.1:<0.23560.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:15:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23707.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:15:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23723.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:15:14] [ns_1@127.0.0.1:<0.23677.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:15] [ns_1@127.0.0.1:<0.23595.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:15] [ns_1@127.0.0.1:<0.23684.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:16] [ns_1@127.0.0.1:<0.23577.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:16] [ns_1@127.0.0.1:<0.23726.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:17] [ns_1@127.0.0.1:<0.23614.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:17] [ns_1@127.0.0.1:<0.23717.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:18] [ns_1@127.0.0.1:<0.23590.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:19] [ns_1@127.0.0.1:<0.23736.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:19] [ns_1@127.0.0.1:<0.23625.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:19] [ns_1@127.0.0.1:<0.23753.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:15:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23723.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:15:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23757.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:15:20] [ns_1@127.0.0.1:<0.23732.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:20] [ns_1@127.0.0.1:<0.23606.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:20] [ns_1@127.0.0.1:<0.23753.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:21] [ns_1@127.0.0.1:<0.23747.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:21] [ns_1@127.0.0.1:<0.23655.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:21] [ns_1@127.0.0.1:<0.23753.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:22] [ns_1@127.0.0.1:<0.23743.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:22] [ns_1@127.0.0.1:<0.23618.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:22] [ns_1@127.0.0.1:<0.23753.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:23] [ns_1@127.0.0.1:<0.23766.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:23] [ns_1@127.0.0.1:<0.23669.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:15:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23753.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:15:24] [ns_1@127.0.0.1:<0.23760.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:24] [ns_1@127.0.0.1:<0.23629.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:15:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23757.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:15:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23792.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:15:25] [ns_1@127.0.0.1:<0.23778.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:25] [ns_1@127.0.0.1:<0.23682.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:26] [ns_1@127.0.0.1:<0.23773.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:26] [ns_1@127.0.0.1:<0.23662.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:27] [ns_1@127.0.0.1:<0.23795.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:27] [ns_1@127.0.0.1:<0.23730.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:28] [ns_1@127.0.0.1:<0.23786.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:28] [ns_1@127.0.0.1:<0.23675.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:29] [ns_1@127.0.0.1:<0.23806.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:29] [ns_1@127.0.0.1:<0.23741.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:29] [ns_1@127.0.0.1:<0.23822.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:15:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23792.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:15:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23827.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:15:30] [ns_1@127.0.0.1:<0.23801.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:30] [ns_1@127.0.0.1:<0.23724.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:30] [ns_1@127.0.0.1:<0.23822.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:31] [ns_1@127.0.0.1:<0.23816.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:31] [ns_1@127.0.0.1:<0.23758.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:31] [ns_1@127.0.0.1:<0.23822.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:32] [ns_1@127.0.0.1:<0.23812.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:32] [ns_1@127.0.0.1:<0.23734.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:32] [ns_1@127.0.0.1:<0.23822.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:33] [ns_1@127.0.0.1:<0.23835.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:33] [ns_1@127.0.0.1:<0.23771.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:15:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23822.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:15:34] [ns_1@127.0.0.1:<0.23830.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:34] [ns_1@127.0.0.1:<0.23745.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:15:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23827.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:15:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23860.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:15:35] [ns_1@127.0.0.1:<0.23848.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:35] [ns_1@127.0.0.1:<0.23784.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:36] [ns_1@127.0.0.1:<0.23842.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:36] [ns_1@127.0.0.1:<0.23764.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:37] [ns_1@127.0.0.1:<0.23863.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:37] [ns_1@127.0.0.1:<0.23799.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:38] [ns_1@127.0.0.1:<0.23857.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:38] [ns_1@127.0.0.1:<0.23776.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:39] [ns_1@127.0.0.1:<0.23874.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:39] [ns_1@127.0.0.1:<0.23890.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:39] [ns_1@127.0.0.1:<0.23810.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:15:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23860.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:15:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23896.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:15:40] [ns_1@127.0.0.1:<0.23870.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:40] [ns_1@127.0.0.1:<0.23890.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:40] [ns_1@127.0.0.1:<0.23793.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:41] [ns_1@127.0.0.1:<0.23893.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:41] [ns_1@127.0.0.1:<0.23890.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:41] [ns_1@127.0.0.1:<0.23828.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:42] [ns_1@127.0.0.1:<0.23880.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:42] [ns_1@127.0.0.1:<0.23890.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:42] [ns_1@127.0.0.1:<0.23804.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:43] [ns_1@127.0.0.1:<0.23907.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:15:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23890.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:15:43] [ns_1@127.0.0.1:<0.23840.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:44] [ns_1@127.0.0.1:<0.23900.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:44] [ns_1@127.0.0.1:<0.23814.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:15:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23896.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:15:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23930.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:15:45] [ns_1@127.0.0.1:<0.23920.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:45] [ns_1@127.0.0.1:<0.23853.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:46] [ns_1@127.0.0.1:<0.23913.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:46] [ns_1@127.0.0.1:<0.23833.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:47] [ns_1@127.0.0.1:<0.23935.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:47] [ns_1@127.0.0.1:<0.23868.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:48] [ns_1@127.0.0.1:<0.23927.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:48] [ns_1@127.0.0.1:<0.23846.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:49] [ns_1@127.0.0.1:<0.23946.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:49] [ns_1@127.0.0.1:<0.23958.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:15:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23930.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:15:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23962.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:15:49] [ns_1@127.0.0.1:<0.23878.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:50] [ns_1@127.0.0.1:<0.23939.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:50] [ns_1@127.0.0.1:<0.23958.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:50] [ns_1@127.0.0.1:<0.23861.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:51] [ns_1@127.0.0.1:<0.23963.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:51] [ns_1@127.0.0.1:<0.23958.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:51] [ns_1@127.0.0.1:<0.23897.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:52] [ns_1@127.0.0.1:<0.23950.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:52] [ns_1@127.0.0.1:<0.23958.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:52] [ns_1@127.0.0.1:<0.23872.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:53] [ns_1@127.0.0.1:<0.23976.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:15:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23958.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:15:53] [ns_1@127.0.0.1:<0.23910.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:54] [ns_1@127.0.0.1:<0.23969.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:15:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23962.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:15:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23997.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:15:55] [ns_1@127.0.0.1:<0.23883.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:55] [ns_1@127.0.0.1:<0.23989.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:56] [ns_1@127.0.0.1:<0.23922.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:56] [ns_1@127.0.0.1:<0.23981.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:57] [ns_1@127.0.0.1:<0.23902.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:57] [ns_1@127.0.0.1:<0.24004.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:58] [ns_1@127.0.0.1:<0.23937.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:58] [ns_1@127.0.0.1:<0.23998.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:59] [ns_1@127.0.0.1:<0.23915.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:15:59] [ns_1@127.0.0.1:<0.24015.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:15:59] [ns_1@127.0.0.1:<0.24028.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:15:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,749759,667260}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37556456}, {processes,10454408}, {processes_used,8828928}, {system,27102048}, {atom,1306681}, {atom_used,1284164}, {binary,473664}, {code,12859877}, {ets,1896752}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,781}, {memory_data,{4040077312,4010012672,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 29360 kB\nBuffers: 55408 kB\nCached: 3532032 kB\nSwapCached: 0 kB\nActive: 303860 kB\nInactive: 3443828 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 29360 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 0 kB\nWriteback: 0 kB\nAnonPages: 160252 kB\nMapped: 24856 kB\nSlab: 134420 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579892 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616800768}, {buffered_memory,56737792}, {free_memory,30064640}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{776403,0}}, {context_switches,{573123,0}}, {garbage_collection,{284197,319426602,0}}, {io,{{input,17697170},{output,18227642}}}, {reductions,{133245367,664985}}, {run_queue,0}, {runtime,{21680,150}}]}]}] [error_logger:error] [2012-03-26 1:15:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23997.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:15:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24033.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:16:00] [ns_1@127.0.0.1:<0.23948.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:00] [ns_1@127.0.0.1:<0.24009.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:00] [ns_1@127.0.0.1:<0.24028.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:01] [ns_1@127.0.0.1:<0.23931.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:01] [ns_1@127.0.0.1:<0.24034.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:01] [ns_1@127.0.0.1:<0.24028.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:02] [ns_1@127.0.0.1:<0.23965.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:02] [ns_1@127.0.0.1:<0.24019.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:02] [ns_1@127.0.0.1:<0.24028.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:03] [ns_1@127.0.0.1:<0.23941.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:03] [ns_1@127.0.0.1:<0.24046.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:16:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24028.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:16:04] [ns_1@127.0.0.1:<0.23978.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:04] [ns_1@127.0.0.1:<0.24039.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:16:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24033.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:16:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24066.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:16:05] [ns_1@127.0.0.1:<0.23952.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:05] [ns_1@127.0.0.1:<0.24059.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:06] [ns_1@127.0.0.1:<0.23991.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:06] [ns_1@127.0.0.1:<0.24052.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:07] [ns_1@127.0.0.1:<0.23971.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:07] [ns_1@127.0.0.1:<0.24074.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:08] [ns_1@127.0.0.1:<0.24006.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:08] [ns_1@127.0.0.1:<0.24067.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:09] [ns_1@127.0.0.1:<0.23983.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:09] [ns_1@127.0.0.1:<0.24084.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:09] [ns_1@127.0.0.1:<0.24100.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:16:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24066.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:16:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24104.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:16:10] [ns_1@127.0.0.1:<0.24017.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:10] [ns_1@127.0.0.1:<0.24078.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:10] [ns_1@127.0.0.1:<0.24100.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:11] [ns_1@127.0.0.1:<0.24000.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:11] [ns_1@127.0.0.1:<0.24105.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:11] [ns_1@127.0.0.1:<0.24100.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:12] [ns_1@127.0.0.1:<0.24036.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:12] [ns_1@127.0.0.1:<0.24089.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:12] [ns_1@127.0.0.1:<0.24100.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:13] [ns_1@127.0.0.1:<0.24011.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:13] [ns_1@127.0.0.1:<0.24118.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:16:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24100.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:16:14] [ns_1@127.0.0.1:<0.24048.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:14] [ns_1@127.0.0.1:<0.24110.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:14] [ns_1@127.0.0.1:<0.24123.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:14] [ns_1@127.0.0.1:<0.24021.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:16:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24104.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:16:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24142.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:16:15] [ns_1@127.0.0.1:<0.24041.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:15] [ns_1@127.0.0.1:<0.24130.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:16] [ns_1@127.0.0.1:<0.24061.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:16] [ns_1@127.0.0.1:<0.24143.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:17] [ns_1@127.0.0.1:<0.24054.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:17] [ns_1@127.0.0.1:<0.24149.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:18] [ns_1@127.0.0.1:<0.24076.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:18] [ns_1@127.0.0.1:<0.24153.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:19] [ns_1@127.0.0.1:<0.24069.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:19] [ns_1@127.0.0.1:<0.24170.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:19] [ns_1@127.0.0.1:<0.24160.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:16:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24142.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:16:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24176.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:16:20] [ns_1@127.0.0.1:<0.24086.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:20] [ns_1@127.0.0.1:<0.24170.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:20] [ns_1@127.0.0.1:<0.24164.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:21] [ns_1@127.0.0.1:<0.24080.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:21] [ns_1@127.0.0.1:<0.24170.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:21] [ns_1@127.0.0.1:<0.24177.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:22] [ns_1@127.0.0.1:<0.24107.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:22] [ns_1@127.0.0.1:<0.24170.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:22] [ns_1@127.0.0.1:<0.24183.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:23] [ns_1@127.0.0.1:<0.24094.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:16:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24170.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:16:23] [ns_1@127.0.0.1:<0.24190.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:24] [ns_1@127.0.0.1:<0.24120.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:16:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24176.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:16:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24209.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:16:24] [ns_1@127.0.0.1:<0.24195.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:25] [ns_1@127.0.0.1:<0.24112.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:25] [ns_1@127.0.0.1:<0.24203.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:26] [ns_1@127.0.0.1:<0.24134.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:26] [ns_1@127.0.0.1:<0.24212.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:27] [ns_1@127.0.0.1:<0.24127.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:28] [ns_1@127.0.0.1:<0.24218.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:28] [ns_1@127.0.0.1:<0.24136.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:29] [ns_1@127.0.0.1:<0.24223.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:29] [ns_1@127.0.0.1:<0.24147.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:29] [ns_1@127.0.0.1:<0.24239.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:16:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24209.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:16:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24244.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:16:30] [ns_1@127.0.0.1:<0.24229.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:30] [ns_1@127.0.0.1:<0.24138.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:30] [ns_1@127.0.0.1:<0.24239.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:31] [ns_1@127.0.0.1:<0.24233.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:31] [ns_1@127.0.0.1:<0.24158.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:31] [ns_1@127.0.0.1:<0.24239.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:32] [ns_1@127.0.0.1:<0.24247.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:32] [ns_1@127.0.0.1:<0.24151.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:32] [ns_1@127.0.0.1:<0.24239.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:33] [ns_1@127.0.0.1:<0.24252.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:33] [ns_1@127.0.0.1:<0.24173.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:16:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24239.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:16:34] [ns_1@127.0.0.1:<0.24259.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:34] [ns_1@127.0.0.1:<0.24162.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:16:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24244.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:16:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24277.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:16:35] [ns_1@127.0.0.1:<0.24265.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:35] [ns_1@127.0.0.1:<0.24188.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:36] [ns_1@127.0.0.1:<0.24272.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:36] [ns_1@127.0.0.1:<0.24181.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:37] [ns_1@127.0.0.1:<0.24280.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:37] [ns_1@127.0.0.1:<0.24201.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:38] [ns_1@127.0.0.1:<0.24287.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:38] [ns_1@127.0.0.1:<0.24193.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:39] [ns_1@127.0.0.1:<0.24291.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:39] [ns_1@127.0.0.1:<0.24216.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:39] [ns_1@127.0.0.1:<0.24309.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:16:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24277.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:16:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24313.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:16:40] [ns_1@127.0.0.1:<0.24297.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:40] [ns_1@127.0.0.1:<0.24210.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:40] [ns_1@127.0.0.1:<0.24309.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:41] [ns_1@127.0.0.1:<0.24302.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:41] [ns_1@127.0.0.1:<0.24227.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:41] [ns_1@127.0.0.1:<0.24309.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:42] [ns_1@127.0.0.1:<0.24316.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:42] [ns_1@127.0.0.1:<0.24221.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:42] [ns_1@127.0.0.1:<0.24309.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:43] [ns_1@127.0.0.1:<0.24321.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:43] [ns_1@127.0.0.1:<0.24245.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:16:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24309.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:16:44] [ns_1@127.0.0.1:<0.24329.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:44] [ns_1@127.0.0.1:<0.24231.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:16:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24313.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:16:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24347.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:16:45] [ns_1@127.0.0.1:<0.24334.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:45] [ns_1@127.0.0.1:<0.24257.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:46] [ns_1@127.0.0.1:<0.24343.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:46] [ns_1@127.0.0.1:<0.24250.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:47] [ns_1@127.0.0.1:<0.24350.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:47] [ns_1@127.0.0.1:<0.24270.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:48] [ns_1@127.0.0.1:<0.24356.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:48] [ns_1@127.0.0.1:<0.24263.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:49] [ns_1@127.0.0.1:<0.24362.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:49] [ns_1@127.0.0.1:<0.24375.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:49] [ns_1@127.0.0.1:<0.24285.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:16:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24347.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:16:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24381.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:16:50] [ns_1@127.0.0.1:<0.24367.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:50] [ns_1@127.0.0.1:<0.24375.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:50] [ns_1@127.0.0.1:<0.24278.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:51] [ns_1@127.0.0.1:<0.24378.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:51] [ns_1@127.0.0.1:<0.24375.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:51] [ns_1@127.0.0.1:<0.24295.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:52] [ns_1@127.0.0.1:<0.24386.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:52] [ns_1@127.0.0.1:<0.24375.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:52] [ns_1@127.0.0.1:<0.24289.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:53] [ns_1@127.0.0.1:<0.24393.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:16:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24375.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:16:53] [ns_1@127.0.0.1:<0.24314.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:54] [ns_1@127.0.0.1:<0.24398.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:54] [ns_1@127.0.0.1:<0.24300.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:16:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24381.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:16:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24416.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:16:55] [ns_1@127.0.0.1:<0.24406.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:55] [ns_1@127.0.0.1:<0.24327.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:56] [ns_1@127.0.0.1:<0.24413.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:56] [ns_1@127.0.0.1:<0.24319.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:57] [ns_1@127.0.0.1:<0.24421.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:57] [ns_1@127.0.0.1:<0.24339.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:58] [ns_1@127.0.0.1:<0.24426.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:58] [ns_1@127.0.0.1:<0.24332.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:16:59] [ns_1@127.0.0.1:<0.24432.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:16:59] [ns_1@127.0.0.1:<0.24445.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:16:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,749819,695318}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37643496}, {processes,10546856}, {processes_used,8921376}, {system,27096640}, {atom,1306681}, {atom_used,1284164}, {binary,480712}, {code,12859877}, {ets,1868472}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,841}, {memory_data,{4040077312,4010520576,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 28864 kB\nBuffers: 55504 kB\nCached: 3532192 kB\nSwapCached: 0 kB\nActive: 304000 kB\nInactive: 3443940 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 28864 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 0 kB\nWriteback: 0 kB\nAnonPages: 160264 kB\nMapped: 24856 kB\nSlab: 134432 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579892 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616964608}, {buffered_memory,56836096}, {free_memory,29556736}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{836432,0}}, {context_switches,{586111,0}}, {garbage_collection,{291221,330531022,0}}, {io,{{input,17727759},{output,18665465}}}, {reductions,{135813683,645182}}, {run_queue,0}, {runtime,{22270,130}}]}]}] [error_logger:error] [2012-03-26 1:16:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24416.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:16:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24450.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:16:59] [ns_1@127.0.0.1:<0.24354.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:00] [ns_1@127.0.0.1:<0.24436.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:00] [ns_1@127.0.0.1:<0.24445.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:00] [ns_1@127.0.0.1:<0.24348.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:01] [ns_1@127.0.0.1:<0.24451.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:01] [ns_1@127.0.0.1:<0.24445.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:01] [ns_1@127.0.0.1:<0.24365.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:02] [ns_1@127.0.0.1:<0.24456.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:02] [ns_1@127.0.0.1:<0.24445.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:03] [ns_1@127.0.0.1:<0.24358.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:03] [ns_1@127.0.0.1:<0.24463.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:17:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24445.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:17:04] [ns_1@127.0.0.1:<0.24382.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:04] [ns_1@127.0.0.1:<0.24469.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:17:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24450.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:17:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24483.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:17:05] [ns_1@127.0.0.1:<0.24369.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:05] [ns_1@127.0.0.1:<0.24476.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:06] [ns_1@127.0.0.1:<0.24395.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:06] [ns_1@127.0.0.1:<0.24484.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:07] [ns_1@127.0.0.1:<0.24388.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:07] [ns_1@127.0.0.1:<0.24491.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:08] [ns_1@127.0.0.1:<0.24408.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:08] [ns_1@127.0.0.1:<0.24495.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:09] [ns_1@127.0.0.1:<0.24400.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:09] [ns_1@127.0.0.1:<0.24501.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:09] [ns_1@127.0.0.1:<0.24517.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:17:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24483.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:17:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24521.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:17:10] [ns_1@127.0.0.1:<0.24423.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:10] [ns_1@127.0.0.1:<0.24506.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:10] [ns_1@127.0.0.1:<0.24517.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:11] [ns_1@127.0.0.1:<0.24417.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:11] [ns_1@127.0.0.1:<0.24522.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:11] [ns_1@127.0.0.1:<0.24517.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:12] [ns_1@127.0.0.1:<0.24434.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:12] [ns_1@127.0.0.1:<0.24527.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:12] [ns_1@127.0.0.1:<0.24517.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:13] [ns_1@127.0.0.1:<0.24428.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:13] [ns_1@127.0.0.1:<0.24535.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:17:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24517.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 1:17:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24521.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:17:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24551.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:17:16] [ns_1@127.0.0.1:<0.24453.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:16] [ns_1@127.0.0.1:<0.24465.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:16] [ns_1@127.0.0.1:<0.24478.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:16] [ns_1@127.0.0.1:<0.24540.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:17] [ns_1@127.0.0.1:<0.24438.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:17] [ns_1@127.0.0.1:<0.24554.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:18] [ns_1@127.0.0.1:<0.24493.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:18] [ns_1@127.0.0.1:<0.24562.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:19] [ns_1@127.0.0.1:<0.24458.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:19] [ns_1@127.0.0.1:<0.24579.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:19] [ns_1@127.0.0.1:<0.24556.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:17:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24551.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:17:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24585.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:17:20] [ns_1@127.0.0.1:<0.24503.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:20] [ns_1@127.0.0.1:<0.24579.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:20] [ns_1@127.0.0.1:<0.24573.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:21] [ns_1@127.0.0.1:<0.24471.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:21] [ns_1@127.0.0.1:<0.24579.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:21] [ns_1@127.0.0.1:<0.24558.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:22] [ns_1@127.0.0.1:<0.24524.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:22] [ns_1@127.0.0.1:<0.24579.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:22] [ns_1@127.0.0.1:<0.24592.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:23] [ns_1@127.0.0.1:<0.24486.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:17:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24579.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:17:23] [ns_1@127.0.0.1:<0.24569.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:24] [ns_1@127.0.0.1:<0.24537.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:17:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24585.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:17:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24618.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:17:24] [ns_1@127.0.0.1:<0.24604.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:25] [ns_1@127.0.0.1:<0.24497.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:26] [ns_1@127.0.0.1:<0.24586.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:26] [ns_1@127.0.0.1:<0.24560.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:27] [ns_1@127.0.0.1:<0.24621.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:27] [ns_1@127.0.0.1:<0.24508.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:28] [ns_1@127.0.0.1:<0.24599.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:28] [ns_1@127.0.0.1:<0.24571.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:29] [ns_1@127.0.0.1:<0.24632.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:29] [ns_1@127.0.0.1:<0.24529.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:29] [ns_1@127.0.0.1:<0.24648.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:17:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24618.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:17:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24653.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:17:30] [ns_1@127.0.0.1:<0.24612.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:30] [ns_1@127.0.0.1:<0.24590.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:30] [ns_1@127.0.0.1:<0.24648.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:31] [ns_1@127.0.0.1:<0.24642.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:31] [ns_1@127.0.0.1:<0.24542.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:31] [ns_1@127.0.0.1:<0.24648.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:32] [ns_1@127.0.0.1:<0.24627.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:32] [ns_1@127.0.0.1:<0.24602.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:32] [ns_1@127.0.0.1:<0.24648.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:33] [ns_1@127.0.0.1:<0.24661.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:33] [ns_1@127.0.0.1:<0.24567.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:17:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24648.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:17:34] [ns_1@127.0.0.1:<0.24638.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:34] [ns_1@127.0.0.1:<0.24619.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:17:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24653.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:17:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24686.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:17:35] [ns_1@127.0.0.1:<0.24674.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:35] [ns_1@127.0.0.1:<0.24582.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:36] [ns_1@127.0.0.1:<0.24656.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:36] [ns_1@127.0.0.1:<0.24630.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:37] [ns_1@127.0.0.1:<0.24689.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:37] [ns_1@127.0.0.1:<0.24597.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:38] [ns_1@127.0.0.1:<0.24668.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:38] [ns_1@127.0.0.1:<0.24640.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:39] [ns_1@127.0.0.1:<0.24700.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:39] [ns_1@127.0.0.1:<0.24610.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:39] [ns_1@127.0.0.1:<0.24718.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:17:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24686.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:17:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24722.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:17:40] [ns_1@127.0.0.1:<0.24681.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:40] [ns_1@127.0.0.1:<0.24659.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:40] [ns_1@127.0.0.1:<0.24718.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:41] [ns_1@127.0.0.1:<0.24711.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:41] [ns_1@127.0.0.1:<0.24625.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:41] [ns_1@127.0.0.1:<0.24718.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:42] [ns_1@127.0.0.1:<0.24696.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:42] [ns_1@127.0.0.1:<0.24672.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:42] [ns_1@127.0.0.1:<0.24718.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:43] [ns_1@127.0.0.1:<0.24730.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:43] [ns_1@127.0.0.1:<0.24636.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:17:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24718.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:17:44] [ns_1@127.0.0.1:<0.24706.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:44] [ns_1@127.0.0.1:<0.24687.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:17:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24722.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:17:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24756.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:17:45] [ns_1@127.0.0.1:<0.24743.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:45] [ns_1@127.0.0.1:<0.24654.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:46] [ns_1@127.0.0.1:<0.24725.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:46] [ns_1@127.0.0.1:<0.24698.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:47] [ns_1@127.0.0.1:<0.24761.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:47] [ns_1@127.0.0.1:<0.24666.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:48] [ns_1@127.0.0.1:<0.24738.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:48] [ns_1@127.0.0.1:<0.24709.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:49] [ns_1@127.0.0.1:<0.24772.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:49] [ns_1@127.0.0.1:<0.24784.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:49] [ns_1@127.0.0.1:<0.24679.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:17:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24756.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:17:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24790.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:17:50] [ns_1@127.0.0.1:<0.24752.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:50] [ns_1@127.0.0.1:<0.24784.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:50] [ns_1@127.0.0.1:<0.24728.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:51] [ns_1@127.0.0.1:<0.24787.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:51] [ns_1@127.0.0.1:<0.24784.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:51] [ns_1@127.0.0.1:<0.24694.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:52] [ns_1@127.0.0.1:<0.24765.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:52] [ns_1@127.0.0.1:<0.24784.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:52] [ns_1@127.0.0.1:<0.24741.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:53] [ns_1@127.0.0.1:<0.24802.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:17:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24784.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:17:53] [ns_1@127.0.0.1:<0.24704.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:54] [ns_1@127.0.0.1:<0.24776.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:54] [ns_1@127.0.0.1:<0.24757.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:17:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24790.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:17:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24825.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:17:55] [ns_1@127.0.0.1:<0.24815.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:55] [ns_1@127.0.0.1:<0.24723.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:56] [ns_1@127.0.0.1:<0.24795.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:56] [ns_1@127.0.0.1:<0.24767.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:57] [ns_1@127.0.0.1:<0.24830.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:57] [ns_1@127.0.0.1:<0.24736.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:58] [ns_1@127.0.0.1:<0.24807.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:58] [ns_1@127.0.0.1:<0.24778.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:17:59] [ns_1@127.0.0.1:<0.24841.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:17:59] [ns_1@127.0.0.1:<0.24869.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:17:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,749879,722231}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37502408}, {processes,10368768}, {processes_used,8743288}, {system,27133640}, {atom,1306681}, {atom_used,1284164}, {binary,470888}, {code,12859877}, {ets,1899752}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,901}, {memory_data,{4040077312,4010647552,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 28740 kB\nBuffers: 55596 kB\nCached: 3532348 kB\nSwapCached: 0 kB\nActive: 304096 kB\nInactive: 3444132 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 28740 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 0 kB\nWriteback: 0 kB\nAnonPages: 160280 kB\nMapped: 24856 kB\nSlab: 134436 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579892 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3617124352}, {buffered_memory,56930304}, {free_memory,29429760}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{896459,0}}, {context_switches,{598979,0}}, {garbage_collection,{298582,341545719,0}}, {io,{{input,17758348},{output,19103661}}}, {reductions,{138407950,662133}}, {run_queue,0}, {runtime,{22820,130}}]}]}] [error_logger:error] [2012-03-26 1:17:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24825.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:17:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24874.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:17:59] [ns_1@127.0.0.1:<0.24748.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:00] [ns_1@127.0.0.1:<0.24822.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:00] [ns_1@127.0.0.1:<0.24869.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:01] [ns_1@127.0.0.1:<0.24797.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:01] [ns_1@127.0.0.1:<0.24875.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:01] [ns_1@127.0.0.1:<0.24869.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:02] [ns_1@127.0.0.1:<0.24763.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:02] [ns_1@127.0.0.1:<0.24835.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:02] [ns_1@127.0.0.1:<0.24869.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:03] [ns_1@127.0.0.1:<0.24809.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:03] [ns_1@127.0.0.1:<0.24887.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:18:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24869.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:18:04] [ns_1@127.0.0.1:<0.24774.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:04] [ns_1@127.0.0.1:<0.24845.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:18:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24874.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:18:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24907.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:18:05] [ns_1@127.0.0.1:<0.24826.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:05] [ns_1@127.0.0.1:<0.24900.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:06] [ns_1@127.0.0.1:<0.24791.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:06] [ns_1@127.0.0.1:<0.24880.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:07] [ns_1@127.0.0.1:<0.24837.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:07] [ns_1@127.0.0.1:<0.24915.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:08] [ns_1@127.0.0.1:<0.24804.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:08] [ns_1@127.0.0.1:<0.24893.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:09] [ns_1@127.0.0.1:<0.24847.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:09] [ns_1@127.0.0.1:<0.24925.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:09] [ns_1@127.0.0.1:<0.24941.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:18:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24907.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:18:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24945.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:18:10] [ns_1@127.0.0.1:<0.24817.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:10] [ns_1@127.0.0.1:<0.24908.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:10] [ns_1@127.0.0.1:<0.24941.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:11] [ns_1@127.0.0.1:<0.24882.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:11] [ns_1@127.0.0.1:<0.24946.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:11] [ns_1@127.0.0.1:<0.24941.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:12] [ns_1@127.0.0.1:<0.24832.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:12] [ns_1@127.0.0.1:<0.24919.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:12] [ns_1@127.0.0.1:<0.24941.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:13] [ns_1@127.0.0.1:<0.24895.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:13] [ns_1@127.0.0.1:<0.24959.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:18:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24941.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:18:14] [ns_1@127.0.0.1:<0.24843.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:14] [ns_1@127.0.0.1:<0.24930.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:18:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24945.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:18:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24979.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:18:15] [ns_1@127.0.0.1:<0.24910.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:15] [ns_1@127.0.0.1:<0.24971.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:16] [ns_1@127.0.0.1:<0.24877.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:16] [ns_1@127.0.0.1:<0.24951.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:16] [ns_1@127.0.0.1:<0.24964.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:16] [ns_1@127.0.0.1:<0.24980.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:17] [ns_1@127.0.0.1:<0.24921.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:17] [ns_1@127.0.0.1:<0.24986.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:18] [ns_1@127.0.0.1:<0.24889.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:18] [ns_1@127.0.0.1:<0.24994.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:19] [ns_1@127.0.0.1:<0.24932.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:19] [ns_1@127.0.0.1:<0.25011.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:19] [ns_1@127.0.0.1:<0.25001.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:18:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24979.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:18:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25017.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:18:20] [ns_1@127.0.0.1:<0.24902.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:20] [ns_1@127.0.0.1:<0.25011.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:20] [ns_1@127.0.0.1:<0.25005.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:21] [ns_1@127.0.0.1:<0.24953.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:21] [ns_1@127.0.0.1:<0.25011.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:21] [ns_1@127.0.0.1:<0.25018.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:22] [ns_1@127.0.0.1:<0.24917.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:22] [ns_1@127.0.0.1:<0.25011.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:22] [ns_1@127.0.0.1:<0.25024.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:23] [ns_1@127.0.0.1:<0.24966.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:18:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25011.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:18:23] [ns_1@127.0.0.1:<0.25031.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:24] [ns_1@127.0.0.1:<0.24927.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:24] [ns_1@127.0.0.1:<0.25036.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:18:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25017.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:18:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25052.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:18:25] [ns_1@127.0.0.1:<0.24982.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:25] [ns_1@127.0.0.1:<0.25044.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:26] [ns_1@127.0.0.1:<0.24948.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:26] [ns_1@127.0.0.1:<0.25053.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:27] [ns_1@127.0.0.1:<0.24996.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:27] [ns_1@127.0.0.1:<0.25059.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:28] [ns_1@127.0.0.1:<0.24961.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:28] [ns_1@127.0.0.1:<0.25064.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:29] [ns_1@127.0.0.1:<0.25014.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:29] [ns_1@127.0.0.1:<0.25080.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:29] [ns_1@127.0.0.1:<0.25070.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:18:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25052.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:18:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25087.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:18:30] [ns_1@127.0.0.1:<0.24973.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:30] [ns_1@127.0.0.1:<0.25080.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:30] [ns_1@127.0.0.1:<0.25074.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:31] [ns_1@127.0.0.1:<0.25029.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:31] [ns_1@127.0.0.1:<0.25080.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:31] [ns_1@127.0.0.1:<0.25088.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:32] [ns_1@127.0.0.1:<0.24988.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:32] [ns_1@127.0.0.1:<0.25080.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:32] [ns_1@127.0.0.1:<0.25093.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:33] [ns_1@127.0.0.1:<0.25042.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:18:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25080.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:18:33] [ns_1@127.0.0.1:<0.25100.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:34] [ns_1@127.0.0.1:<0.24990.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:18:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25087.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:18:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25118.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:18:35] [ns_1@127.0.0.1:<0.25106.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:35] [ns_1@127.0.0.1:<0.25057.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:36] [ns_1@127.0.0.1:<0.25113.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:36] [ns_1@127.0.0.1:<0.24992.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:37] [ns_1@127.0.0.1:<0.25121.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:37] [ns_1@127.0.0.1:<0.25068.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:38] [ns_1@127.0.0.1:<0.25128.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:38] [ns_1@127.0.0.1:<0.25003.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:39] [ns_1@127.0.0.1:<0.25132.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:39] [ns_1@127.0.0.1:<0.25084.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:39] [ns_1@127.0.0.1:<0.25150.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:18:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25118.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:18:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25154.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:18:40] [ns_1@127.0.0.1:<0.25138.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:40] [ns_1@127.0.0.1:<0.25021.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:40] [ns_1@127.0.0.1:<0.25150.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:41] [ns_1@127.0.0.1:<0.25143.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:41] [ns_1@127.0.0.1:<0.25098.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:41] [ns_1@127.0.0.1:<0.25150.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:42] [ns_1@127.0.0.1:<0.25157.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:42] [ns_1@127.0.0.1:<0.25034.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:42] [ns_1@127.0.0.1:<0.25150.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:43] [ns_1@127.0.0.1:<0.25162.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:43] [ns_1@127.0.0.1:<0.25111.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:18:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25150.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:18:44] [ns_1@127.0.0.1:<0.25170.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:44] [ns_1@127.0.0.1:<0.25049.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:18:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25154.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:18:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25188.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:18:45] [ns_1@127.0.0.1:<0.25175.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:45] [ns_1@127.0.0.1:<0.25126.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:46] [ns_1@127.0.0.1:<0.25182.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:46] [ns_1@127.0.0.1:<0.25062.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:47] [ns_1@127.0.0.1:<0.25191.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:47] [ns_1@127.0.0.1:<0.25136.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:48] [ns_1@127.0.0.1:<0.25197.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:48] [ns_1@127.0.0.1:<0.25072.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:49] [ns_1@127.0.0.1:<0.25201.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:49] [ns_1@127.0.0.1:<0.25155.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:49] [ns_1@127.0.0.1:<0.25218.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:18:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25188.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:18:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25222.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:18:50] [ns_1@127.0.0.1:<0.25208.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:50] [ns_1@127.0.0.1:<0.25091.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:50] [ns_1@127.0.0.1:<0.25218.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:51] [ns_1@127.0.0.1:<0.25212.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:51] [ns_1@127.0.0.1:<0.25168.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:51] [ns_1@127.0.0.1:<0.25218.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:52] [ns_1@127.0.0.1:<0.25225.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:52] [ns_1@127.0.0.1:<0.25104.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:52] [ns_1@127.0.0.1:<0.25218.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:53] [ns_1@127.0.0.1:<0.25231.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:53] [ns_1@127.0.0.1:<0.25180.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:18:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25218.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:18:54] [ns_1@127.0.0.1:<0.25238.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:54] [ns_1@127.0.0.1:<0.25119.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:18:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25222.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:18:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25257.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:18:55] [ns_1@127.0.0.1:<0.25243.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:55] [ns_1@127.0.0.1:<0.25195.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:56] [ns_1@127.0.0.1:<0.25254.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:56] [ns_1@127.0.0.1:<0.25130.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:57] [ns_1@127.0.0.1:<0.25262.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:57] [ns_1@127.0.0.1:<0.25206.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:58] [ns_1@127.0.0.1:<0.25266.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:58] [ns_1@127.0.0.1:<0.25141.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:18:59] [ns_1@127.0.0.1:<0.25273.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:18:59] [ns_1@127.0.0.1:<0.25286.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:18:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,749939,751322}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37581248}, {processes,10461184}, {processes_used,8835704}, {system,27120064}, {atom,1306681}, {atom_used,1284164}, {binary,469328}, {code,12859877}, {ets,1871480}]}, {system_stats, [{cpu_utilization_rate,25.25}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,961}, {memory_data,{4040077312,4010774528,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 28616 kB\nBuffers: 55752 kB\nCached: 3532492 kB\nSwapCached: 0 kB\nActive: 304096 kB\nInactive: 3444428 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 28616 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 0 kB\nWriteback: 0 kB\nAnonPages: 160296 kB\nMapped: 24856 kB\nSlab: 134420 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579892 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3617271808}, {buffered_memory,57090048}, {free_memory,29302784}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{956487,0}}, {context_switches,{612329,0}}, {garbage_collection,{306056,352875902,0}}, {io,{{input,18035183},{output,19766042}}}, {reductions,{141077245,646480}}, {run_queue,0}, {runtime,{23380,130}}]}]}] [stats:error] [2012-03-26 1:18:59] [ns_1@127.0.0.1:<0.25223.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:18:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25257.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:18:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25293.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:19:00] [ns_1@127.0.0.1:<0.25277.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:00] [ns_1@127.0.0.1:<0.25286.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:00] [ns_1@127.0.0.1:<0.25160.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:01] [ns_1@127.0.0.1:<0.25290.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:01] [ns_1@127.0.0.1:<0.25286.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:01] [ns_1@127.0.0.1:<0.25236.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:02] [ns_1@127.0.0.1:<0.25297.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:02] [ns_1@127.0.0.1:<0.25286.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:02] [ns_1@127.0.0.1:<0.25173.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:03] [ns_1@127.0.0.1:<0.25304.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:19:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25286.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:19:03] [ns_1@127.0.0.1:<0.25249.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:04] [ns_1@127.0.0.1:<0.25310.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:04] [ns_1@127.0.0.1:<0.25189.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:19:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25293.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:19:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25326.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:19:05] [ns_1@127.0.0.1:<0.25317.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:05] [ns_1@127.0.0.1:<0.25264.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:06] [ns_1@127.0.0.1:<0.25323.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:06] [ns_1@127.0.0.1:<0.25199.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:07] [ns_1@127.0.0.1:<0.25332.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:07] [ns_1@127.0.0.1:<0.25275.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:08] [ns_1@127.0.0.1:<0.25336.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:08] [ns_1@127.0.0.1:<0.25210.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:09] [ns_1@127.0.0.1:<0.25342.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:09] [ns_1@127.0.0.1:<0.25358.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:19:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25326.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:19:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25362.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:19:10] [ns_1@127.0.0.1:<0.25294.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:10] [ns_1@127.0.0.1:<0.25347.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:10] [ns_1@127.0.0.1:<0.25358.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:11] [ns_1@127.0.0.1:<0.25229.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:11] [ns_1@127.0.0.1:<0.25363.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:11] [ns_1@127.0.0.1:<0.25358.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:12] [ns_1@127.0.0.1:<0.25306.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:12] [ns_1@127.0.0.1:<0.25368.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:12] [ns_1@127.0.0.1:<0.25358.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:13] [ns_1@127.0.0.1:<0.25241.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:13] [ns_1@127.0.0.1:<0.25376.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:19:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25358.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:19:14] [ns_1@127.0.0.1:<0.25319.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:14] [ns_1@127.0.0.1:<0.25381.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:19:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25362.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:19:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25396.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:19:15] [ns_1@127.0.0.1:<0.25258.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:15] [ns_1@127.0.0.1:<0.25388.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:16] [ns_1@127.0.0.1:<0.25334.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:16] [ns_1@127.0.0.1:<0.25397.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:17] [ns_1@127.0.0.1:<0.25269.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:17] [ns_1@127.0.0.1:<0.25279.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:17] [ns_1@127.0.0.1:<0.25299.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:17] [ns_1@127.0.0.1:<0.25312.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:17] [ns_1@127.0.0.1:<0.25327.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:17] [ns_1@127.0.0.1:<0.25403.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:18] [ns_1@127.0.0.1:<0.25344.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:18] [ns_1@127.0.0.1:<0.25407.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:19] [ns_1@127.0.0.1:<0.25338.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:19] [ns_1@127.0.0.1:<0.25422.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:19] [ns_1@127.0.0.1:<0.25434.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:19:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25396.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:19:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25438.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:19:20] [ns_1@127.0.0.1:<0.25365.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:20] [ns_1@127.0.0.1:<0.25409.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:20] [ns_1@127.0.0.1:<0.25434.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:21] [ns_1@127.0.0.1:<0.25349.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:21] [ns_1@127.0.0.1:<0.25439.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:21] [ns_1@127.0.0.1:<0.25434.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:22] [ns_1@127.0.0.1:<0.25378.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:22] [ns_1@127.0.0.1:<0.25411.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:22] [ns_1@127.0.0.1:<0.25434.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:23] [ns_1@127.0.0.1:<0.25370.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:23] [ns_1@127.0.0.1:<0.25452.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:19:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25434.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:19:24] [ns_1@127.0.0.1:<0.25390.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:24] [ns_1@127.0.0.1:<0.25413.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:19:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25438.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:19:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25473.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:19:25] [ns_1@127.0.0.1:<0.25383.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:25] [ns_1@127.0.0.1:<0.25465.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:26] [ns_1@127.0.0.1:<0.25405.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:26] [ns_1@127.0.0.1:<0.25415.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:27] [ns_1@127.0.0.1:<0.25399.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:27] [ns_1@127.0.0.1:<0.25480.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:28] [ns_1@127.0.0.1:<0.25424.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:28] [ns_1@127.0.0.1:<0.25426.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:29] [ns_1@127.0.0.1:<0.25417.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:29] [ns_1@127.0.0.1:<0.25501.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:29] [ns_1@127.0.0.1:<0.25491.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:19:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25473.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:19:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25508.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:19:30] [ns_1@127.0.0.1:<0.25441.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:30] [ns_1@127.0.0.1:<0.25501.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:30] [ns_1@127.0.0.1:<0.25445.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:31] [ns_1@127.0.0.1:<0.25428.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:31] [ns_1@127.0.0.1:<0.25501.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:31] [ns_1@127.0.0.1:<0.25509.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:32] [ns_1@127.0.0.1:<0.25454.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:32] [ns_1@127.0.0.1:<0.25501.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:32] [ns_1@127.0.0.1:<0.25457.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:33] [ns_1@127.0.0.1:<0.25447.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:19:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25501.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:19:33] [ns_1@127.0.0.1:<0.25521.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:34] [ns_1@127.0.0.1:<0.25467.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:34] [ns_1@127.0.0.1:<0.25474.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:19:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25508.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:19:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25541.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:19:35] [ns_1@127.0.0.1:<0.25459.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:35] [ns_1@127.0.0.1:<0.25534.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:36] [ns_1@127.0.0.1:<0.25482.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:36] [ns_1@127.0.0.1:<0.25485.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:37] [ns_1@127.0.0.1:<0.25476.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:37] [ns_1@127.0.0.1:<0.25549.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:38] [ns_1@127.0.0.1:<0.25493.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:38] [ns_1@127.0.0.1:<0.25495.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:39] [ns_1@127.0.0.1:<0.25489.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:39] [ns_1@127.0.0.1:<0.25571.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:39] [ns_1@127.0.0.1:<0.25559.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:19:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25541.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:19:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25577.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:19:40] [ns_1@127.0.0.1:<0.25512.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:40] [ns_1@127.0.0.1:<0.25571.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:40] [ns_1@127.0.0.1:<0.25514.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:41] [ns_1@127.0.0.1:<0.25504.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:41] [ns_1@127.0.0.1:<0.25571.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:41] [ns_1@127.0.0.1:<0.25578.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:42] [ns_1@127.0.0.1:<0.25525.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:42] [ns_1@127.0.0.1:<0.25571.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:43] [ns_1@127.0.0.1:<0.25527.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:43] [ns_1@127.0.0.1:<0.25519.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:19:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25571.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:19:44] [ns_1@127.0.0.1:<0.25591.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:44] [ns_1@127.0.0.1:<0.25538.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:19:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25577.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:19:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25609.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:19:45] [ns_1@127.0.0.1:<0.25542.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:45] [ns_1@127.0.0.1:<0.25532.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:46] [ns_1@127.0.0.1:<0.25603.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:46] [ns_1@127.0.0.1:<0.25551.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:47] [ns_1@127.0.0.1:<0.25553.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:47] [ns_1@127.0.0.1:<0.25547.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:48] [ns_1@127.0.0.1:<0.25618.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:48] [ns_1@127.0.0.1:<0.25562.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:49] [ns_1@127.0.0.1:<0.25564.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:49] [ns_1@127.0.0.1:<0.25557.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:49] [ns_1@127.0.0.1:<0.25639.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:19:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25609.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:19:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25643.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:19:50] [ns_1@127.0.0.1:<0.25629.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:50] [ns_1@127.0.0.1:<0.25581.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:50] [ns_1@127.0.0.1:<0.25639.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:51] [ns_1@127.0.0.1:<0.25583.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:51] [ns_1@127.0.0.1:<0.25574.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:51] [ns_1@127.0.0.1:<0.25639.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:52] [ns_1@127.0.0.1:<0.25646.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:52] [ns_1@127.0.0.1:<0.25594.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:52] [ns_1@127.0.0.1:<0.25639.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:53] [ns_1@127.0.0.1:<0.25596.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:53] [ns_1@127.0.0.1:<0.25589.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:19:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25639.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:19:54] [ns_1@127.0.0.1:<0.25659.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:54] [ns_1@127.0.0.1:<0.25610.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:19:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25643.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:19:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25678.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:19:55] [ns_1@127.0.0.1:<0.25612.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:55] [ns_1@127.0.0.1:<0.25601.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:56] [ns_1@127.0.0.1:<0.25672.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:56] [ns_1@127.0.0.1:<0.25620.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:57] [ns_1@127.0.0.1:<0.25622.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:57] [ns_1@127.0.0.1:<0.25616.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:58] [ns_1@127.0.0.1:<0.25687.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:58] [ns_1@127.0.0.1:<0.25631.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:59] [ns_1@127.0.0.1:<0.25633.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:19:59] [ns_1@127.0.0.1:<0.25627.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:19:59] [ns_1@127.0.0.1:<0.25707.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:19:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,749999,780249}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37792336}, {processes,10610800}, {processes_used,8987144}, {system,27181536}, {atom,1306681}, {atom_used,1284164}, {binary,490352}, {code,12859877}, {ets,1900480}]}, {system_stats, [{cpu_utilization_rate,25.06265664160401}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,1021}, {memory_data,{4040077312,4010901504,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 28492 kB\nBuffers: 55844 kB\nCached: 3532648 kB\nSwapCached: 0 kB\nActive: 304508 kB\nInactive: 3444632 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 28492 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 4 kB\nWriteback: 0 kB\nAnonPages: 160532 kB\nMapped: 24856 kB\nSlab: 134428 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579892 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3617431552}, {buffered_memory,57184256}, {free_memory,29175808}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{1016517,0}}, {context_switches,{625510,0}}, {garbage_collection,{313311,364331527,0}}, {io,{{input,18065790},{output,20216222}}}, {reductions,{143719020,661033}}, {run_queue,0}, {runtime,{23980,150}}]}]}] [error_logger:error] [2012-03-26 1:19:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25678.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:19:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25714.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:20:00] [ns_1@127.0.0.1:<0.25698.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:00] [ns_1@127.0.0.1:<0.25650.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:00] [ns_1@127.0.0.1:<0.25707.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:01] [ns_1@127.0.0.1:<0.25652.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:01] [ns_1@127.0.0.1:<0.25644.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:01] [ns_1@127.0.0.1:<0.25707.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:02] [ns_1@127.0.0.1:<0.25717.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:02] [ns_1@127.0.0.1:<0.25662.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:02] [ns_1@127.0.0.1:<0.25707.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:03] [ns_1@127.0.0.1:<0.25664.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:20:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25707.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:20:03] [ns_1@127.0.0.1:<0.25657.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:04] [ns_1@127.0.0.1:<0.25729.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:04] [ns_1@127.0.0.1:<0.25679.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:20:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25714.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:20:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25747.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:20:05] [ns_1@127.0.0.1:<0.25681.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:05] [ns_1@127.0.0.1:<0.25670.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:06] [ns_1@127.0.0.1:<0.25744.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:06] [ns_1@127.0.0.1:<0.25690.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:07] [ns_1@127.0.0.1:<0.25692.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:07] [ns_1@127.0.0.1:<0.25685.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:08] [ns_1@127.0.0.1:<0.25757.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:08] [ns_1@127.0.0.1:<0.25700.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:09] [ns_1@127.0.0.1:<0.25702.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:09] [ns_1@127.0.0.1:<0.25776.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:09] [ns_1@127.0.0.1:<0.25696.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:20:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25747.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:20:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25785.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:20:10] [ns_1@127.0.0.1:<0.25768.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:10] [ns_1@127.0.0.1:<0.25776.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:10] [ns_1@127.0.0.1:<0.25720.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:11] [ns_1@127.0.0.1:<0.25722.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:11] [ns_1@127.0.0.1:<0.25776.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:11] [ns_1@127.0.0.1:<0.25715.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:12] [ns_1@127.0.0.1:<0.25789.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:12] [ns_1@127.0.0.1:<0.25776.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:12] [ns_1@127.0.0.1:<0.25733.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:13] [ns_1@127.0.0.1:<0.25738.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:20:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25776.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:20:13] [ns_1@127.0.0.1:<0.25727.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:14] [ns_1@127.0.0.1:<0.25802.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:14] [ns_1@127.0.0.1:<0.25748.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:20:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25785.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:20:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25819.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:20:15] [ns_1@127.0.0.1:<0.25753.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:15] [ns_1@127.0.0.1:<0.25740.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:16] [ns_1@127.0.0.1:<0.25816.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:16] [ns_1@127.0.0.1:<0.25759.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:17] [ns_1@127.0.0.1:<0.25763.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:17] [ns_1@127.0.0.1:<0.25809.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:17] [ns_1@127.0.0.1:<0.25824.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:17] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:20:17] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:20:17] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:20:17] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:20:17] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:warn] [2012-03-26 1:20:22] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 1:20:22] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:20:22] [ns_1@127.0.0.1:<0.25755.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 1:20:22] [ns_1@127.0.0.1:<0.25804.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:20:22] [ns_1@127.0.0.1:<0.25869.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:22] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:20:22] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:20:22] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:20:22] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:20:22] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:20:22] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [stats:error] [2012-03-26 1:20:22] [ns_1@127.0.0.1:<0.25820.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:22] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:20:22] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:20:22] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:20:22] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:20:24: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 1:20:23] [ns_1@127.0.0.1:<0.25830.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:23] [ns_1@127.0.0.1:<0.25869.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:23] [ns_1@127.0.0.1:<0.25765.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:24] [ns_1@127.0.0.1:<0.25786.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:24] [ns_1@127.0.0.1:<0.25869.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:24] [ns_1@127.0.0.1:<0.25879.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:20:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25819.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:20:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25896.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:20:25] [ns_1@127.0.0.1:<0.25832.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:25] [ns_1@127.0.0.1:<0.25869.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:25] [ns_1@127.0.0.1:<0.25887.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:26] [ns_1@127.0.0.1:<0.25799.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:26] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:20:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25869.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:20:26] [ns_1@127.0.0.1:<0.25897.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:27] [ns_1@127.0.0.1:<0.25835.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:27] [ns_1@127.0.0.1:<0.25904.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:28] [ns_1@127.0.0.1:<0.25811.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:28] [ns_1@127.0.0.1:<0.25910.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:29] [ns_1@127.0.0.1:<0.25838.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:29] [ns_1@127.0.0.1:<0.25916.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:29] [ns_1@127.0.0.1:<0.25926.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:20:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25896.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:20:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25933.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:20:30] [ns_1@127.0.0.1:<0.25826.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:30] [ns_1@127.0.0.1:<0.25920.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:30] [ns_1@127.0.0.1:<0.25926.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:31] [ns_1@127.0.0.1:<0.25840.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:31] [ns_1@127.0.0.1:<0.25934.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:31] [ns_1@127.0.0.1:<0.25926.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:32] [ns_1@127.0.0.1:<0.25862.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:32] [ns_1@127.0.0.1:<0.25939.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:32] [ns_1@127.0.0.1:<0.25926.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:33] [ns_1@127.0.0.1:<0.25842.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:33] [ns_1@127.0.0.1:<0.25946.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:20:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25926.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:20:34] [ns_1@127.0.0.1:<0.25863.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:34] [ns_1@127.0.0.1:<0.25952.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:20:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25933.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:20:34] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25967.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:20:35] [ns_1@127.0.0.1:<0.25882.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:35] [ns_1@127.0.0.1:<0.25959.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:36] [ns_1@127.0.0.1:<0.25864.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:36] [ns_1@127.0.0.1:<0.25968.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:37] [ns_1@127.0.0.1:<0.25900.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:37] [ns_1@127.0.0.1:<0.25975.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:38] [ns_1@127.0.0.1:<0.25865.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:38] [ns_1@127.0.0.1:<0.25979.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:39] [ns_1@127.0.0.1:<0.25912.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:39] [ns_1@127.0.0.1:<0.25994.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:39] [ns_1@127.0.0.1:<0.25985.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:20:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25967.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:20:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26002.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:20:40] [ns_1@127.0.0.1:<0.25872.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:40] [ns_1@127.0.0.1:<0.25994.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:40] [ns_1@127.0.0.1:<0.25990.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:41] [ns_1@127.0.0.1:<0.25922.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:41] [ns_1@127.0.0.1:<0.25994.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:41] [ns_1@127.0.0.1:<0.26003.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:42] [ns_1@127.0.0.1:<0.25890.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:42] [ns_1@127.0.0.1:<0.25994.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:42] [ns_1@127.0.0.1:<0.26008.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:43] [ns_1@127.0.0.1:<0.25941.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:20:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25994.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:20:43] [ns_1@127.0.0.1:<0.26016.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:44] [ns_1@127.0.0.1:<0.25907.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:44] [ns_1@127.0.0.1:<0.26021.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:20:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26002.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:20:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26036.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:20:45] [ns_1@127.0.0.1:<0.25954.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:45] [ns_1@127.0.0.1:<0.26028.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:46] [ns_1@127.0.0.1:<0.25918.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:46] [ns_1@127.0.0.1:<0.26037.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:47] [ns_1@127.0.0.1:<0.25972.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:47] [ns_1@127.0.0.1:<0.26043.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:48] [ns_1@127.0.0.1:<0.25936.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:48] [ns_1@127.0.0.1:<0.26047.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:49] [ns_1@127.0.0.1:<0.25983.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:49] [ns_1@127.0.0.1:<0.26062.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:20:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26036.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:20:49] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26069.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:20:49] [ns_1@127.0.0.1:<0.26054.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:50] [ns_1@127.0.0.1:<0.25948.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:50] [ns_1@127.0.0.1:<0.26062.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:51] [ns_1@127.0.0.1:<0.26058.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:51] [ns_1@127.0.0.1:<0.25999.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:51] [ns_1@127.0.0.1:<0.26062.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:52] [ns_1@127.0.0.1:<0.26072.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:52] [ns_1@127.0.0.1:<0.25961.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:52] [ns_1@127.0.0.1:<0.26062.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:53] [ns_1@127.0.0.1:<0.26078.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:53] [ns_1@127.0.0.1:<0.26014.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:20:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26062.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:20:54] [ns_1@127.0.0.1:<0.26085.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:54] [ns_1@127.0.0.1:<0.25977.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:20:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26069.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:20:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26103.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:20:55] [ns_1@127.0.0.1:<0.26090.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:55] [ns_1@127.0.0.1:<0.26026.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:56] [ns_1@127.0.0.1:<0.26098.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:56] [ns_1@127.0.0.1:<0.25988.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:57] [ns_1@127.0.0.1:<0.26106.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:57] [ns_1@127.0.0.1:<0.26041.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:58] [ns_1@127.0.0.1:<0.26112.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:58] [ns_1@127.0.0.1:<0.26006.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:59] [ns_1@127.0.0.1:<0.26117.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:20:59] [ns_1@127.0.0.1:<0.26052.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:20:59] [ns_1@127.0.0.1:<0.26145.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:20:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,750054,804260}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37550816}, {processes,10323648}, {processes_used,8698168}, {system,27227168}, {atom,1306681}, {atom_used,1284164}, {binary,505640}, {code,12859877}, {ets,1925144}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,1076}, {memory_data,{4040077312,4010901504,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 28120 kB\nBuffers: 55964 kB\nCached: 3532796 kB\nSwapCached: 0 kB\nActive: 304308 kB\nInactive: 3444800 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 28120 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 72 kB\nWriteback: 0 kB\nAnonPages: 160340 kB\nMapped: 24868 kB\nSlab: 134428 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579892 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3617583104}, {buffered_memory,57307136}, {free_memory,28794880}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{1071540,0}}, {context_switches,{637660,0}}, {garbage_collection,{319996,375042506,0}}, {io,{{input,18099655},{output,20666529}}}, {reductions,{146132829,643023}}, {run_queue,0}, {runtime,{24540,140}}]}]}] [error_logger:error] [2012-03-26 1:20:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26103.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:20:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26152.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:21:00] [ns_1@127.0.0.1:<0.26123.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:00] [ns_1@127.0.0.1:<0.26019.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:00] [ns_1@127.0.0.1:<0.26145.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:01] [ns_1@127.0.0.1:<0.26140.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:01] [ns_1@127.0.0.1:<0.26070.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:01] [ns_1@127.0.0.1:<0.26145.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:02] [ns_1@127.0.0.1:<0.26155.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:02] [ns_1@127.0.0.1:<0.26033.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:02] [ns_1@127.0.0.1:<0.26145.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:03] [ns_1@127.0.0.1:<0.26160.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:03] [ns_1@127.0.0.1:<0.26083.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:21:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26145.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:21:04] [ns_1@127.0.0.1:<0.26167.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:04] [ns_1@127.0.0.1:<0.26045.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:21:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26152.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:21:04] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26186.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:21:05] [ns_1@127.0.0.1:<0.26173.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:09] [ns_1@127.0.0.1:<0.26096.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:09] [ns_1@127.0.0.1:<0.26201.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:09] [ns_1@127.0.0.1:<0.26110.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:21:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26186.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:21:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26209.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:21:10] [ns_1@127.0.0.1:<0.26180.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:10] [ns_1@127.0.0.1:<0.26201.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:10] [ns_1@127.0.0.1:<0.26056.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:11] [ns_1@127.0.0.1:<0.26206.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:11] [ns_1@127.0.0.1:<0.26201.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:11] [ns_1@127.0.0.1:<0.26121.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:12] [ns_1@127.0.0.1:<0.26213.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:12] [ns_1@127.0.0.1:<0.26201.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:12] [ns_1@127.0.0.1:<0.26076.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:13] [ns_1@127.0.0.1:<0.26221.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:21:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26201.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:21:13] [ns_1@127.0.0.1:<0.26153.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:14] [ns_1@127.0.0.1:<0.26226.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:14] [ns_1@127.0.0.1:<0.26088.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:21:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26209.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:21:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26243.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:21:15] [ns_1@127.0.0.1:<0.26233.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:15] [ns_1@127.0.0.1:<0.26165.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:16] [ns_1@127.0.0.1:<0.26240.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:16] [ns_1@127.0.0.1:<0.26104.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:17] [ns_1@127.0.0.1:<0.26248.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:17] [ns_1@127.0.0.1:<0.26178.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:18] [ns_1@127.0.0.1:<0.26252.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:18] [ns_1@127.0.0.1:<0.26115.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:19] [ns_1@127.0.0.1:<0.26259.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:19] [ns_1@127.0.0.1:<0.26269.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:21:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26243.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:21:19] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26276.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:21:20] [ns_1@127.0.0.1:<0.26210.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:20] [ns_1@127.0.0.1:<0.26263.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:20] [ns_1@127.0.0.1:<0.26269.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:21] [ns_1@127.0.0.1:<0.26125.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:21] [ns_1@127.0.0.1:<0.26277.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:21] [ns_1@127.0.0.1:<0.26269.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:22] [ns_1@127.0.0.1:<0.26223.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:22] [ns_1@127.0.0.1:<0.26283.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:22] [ns_1@127.0.0.1:<0.26269.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:23] [ns_1@127.0.0.1:<0.26158.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:21:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26269.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 1:21:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26276.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:21:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26304.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:21:29] [ns_1@127.0.0.1:<0.26312.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:21:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26304.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:21:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26319.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:21:30] [ns_1@127.0.0.1:<0.26312.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:31] [ns_1@127.0.0.1:<0.26312.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:32] [ns_1@127.0.0.1:<0.26312.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:33] [ns_1@127.0.0.1:<0.26171.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:33] [ns_1@127.0.0.1:<0.26290.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:21:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26312.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:21:34] [ns_1@127.0.0.1:<0.26235.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:34] [ns_1@127.0.0.1:<0.26295.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:21:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26319.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:21:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26341.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:21:35] [ns_1@127.0.0.1:<0.26187.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:35] [ns_1@127.0.0.1:<0.26333.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:36] [ns_1@127.0.0.1:<0.26250.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:36] [ns_1@127.0.0.1:<0.26326.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:37] [ns_1@127.0.0.1:<0.26195.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:37] [ns_1@127.0.0.1:<0.26349.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:38] [ns_1@127.0.0.1:<0.26261.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:38] [ns_1@127.0.0.1:<0.26342.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:39] [ns_1@127.0.0.1:<0.26215.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:39] [ns_1@127.0.0.1:<0.26359.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:39] [ns_1@127.0.0.1:<0.26370.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:21:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26341.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:21:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26376.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:21:40] [ns_1@127.0.0.1:<0.26279.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:40] [ns_1@127.0.0.1:<0.26353.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:40] [ns_1@127.0.0.1:<0.26370.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:41] [ns_1@127.0.0.1:<0.26228.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:41] [ns_1@127.0.0.1:<0.26377.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:41] [ns_1@127.0.0.1:<0.26370.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:42] [ns_1@127.0.0.1:<0.26292.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:42] [ns_1@127.0.0.1:<0.26364.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:42] [ns_1@127.0.0.1:<0.26370.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:43] [ns_1@127.0.0.1:<0.26244.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:43] [ns_1@127.0.0.1:<0.26390.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:21:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26370.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:21:44] [ns_1@127.0.0.1:<0.26335.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:44] [ns_1@127.0.0.1:<0.26382.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:21:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26376.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:21:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26410.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:21:45] [ns_1@127.0.0.1:<0.26254.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:45] [ns_1@127.0.0.1:<0.26402.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:46] [ns_1@127.0.0.1:<0.26351.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:46] [ns_1@127.0.0.1:<0.26395.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:47] [ns_1@127.0.0.1:<0.26265.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:47] [ns_1@127.0.0.1:<0.26417.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:48] [ns_1@127.0.0.1:<0.26361.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:48] [ns_1@127.0.0.1:<0.26411.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:49] [ns_1@127.0.0.1:<0.26285.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:49] [ns_1@127.0.0.1:<0.26436.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:49] [ns_1@127.0.0.1:<0.26428.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:21:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26410.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:21:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26445.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:21:50] [ns_1@127.0.0.1:<0.26379.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:50] [ns_1@127.0.0.1:<0.26436.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:50] [ns_1@127.0.0.1:<0.26421.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:51] [ns_1@127.0.0.1:<0.26328.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:51] [ns_1@127.0.0.1:<0.26436.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:51] [ns_1@127.0.0.1:<0.26446.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:52] [ns_1@127.0.0.1:<0.26392.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:52] [ns_1@127.0.0.1:<0.26436.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:52] [ns_1@127.0.0.1:<0.26432.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:53] [ns_1@127.0.0.1:<0.26344.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:21:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26436.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:21:53] [ns_1@127.0.0.1:<0.26459.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:54] [ns_1@127.0.0.1:<0.26404.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:54] [ns_1@127.0.0.1:<0.26452.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:21:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26445.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:21:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26479.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:21:55] [ns_1@127.0.0.1:<0.26355.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:55] [ns_1@127.0.0.1:<0.26472.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:56] [ns_1@127.0.0.1:<0.26419.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:56] [ns_1@127.0.0.1:<0.26464.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:57] [ns_1@127.0.0.1:<0.26366.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:57] [ns_1@127.0.0.1:<0.26486.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:58] [ns_1@127.0.0.1:<0.26430.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:58] [ns_1@127.0.0.1:<0.26480.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:21:59] [ns_1@127.0.0.1:<0.26384.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:21:59] [ns_1@127.0.0.1:<0.26506.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:21:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,750114,832289}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37598880}, {processes,10326648}, {processes_used,8701168}, {system,27272232}, {atom,1306681}, {atom_used,1284164}, {binary,511952}, {code,12859877}, {ets,1957232}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,1136}, {memory_data,{4040077312,4011282432,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27872 kB\nBuffers: 56116 kB\nCached: 3532532 kB\nSwapCached: 0 kB\nActive: 304288 kB\nInactive: 3444740 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27872 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 76 kB\nWriteback: 0 kB\nAnonPages: 160364 kB\nMapped: 24868 kB\nSlab: 134424 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579892 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3617312768}, {buffered_memory,57462784}, {free_memory,28540928}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{1131568,0}}, {context_switches,{649828,0}}, {garbage_collection,{326973,384538708,0}}, {io,{{input,18375900},{output,21233421}}}, {reductions,{148538311,639325}}, {run_queue,0}, {runtime,{25060,130}}]}]}] [stats:error] [2012-03-26 1:21:59] [ns_1@127.0.0.1:<0.26497.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:22:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26479.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:22:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26515.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:22:00] [ns_1@127.0.0.1:<0.26449.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:00] [ns_1@127.0.0.1:<0.26506.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:00] [ns_1@127.0.0.1:<0.26491.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:01] [ns_1@127.0.0.1:<0.26397.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:01] [ns_1@127.0.0.1:<0.26506.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:01] [ns_1@127.0.0.1:<0.26516.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:02] [ns_1@127.0.0.1:<0.26462.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:02] [ns_1@127.0.0.1:<0.26506.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:02] [ns_1@127.0.0.1:<0.26501.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:03] [ns_1@127.0.0.1:<0.26413.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:22:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26506.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:22:04] [ns_1@127.0.0.1:<0.26528.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:04] [ns_1@127.0.0.1:<0.26476.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:22:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26515.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:22:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26547.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:22:05] [ns_1@127.0.0.1:<0.26521.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:05] [ns_1@127.0.0.1:<0.26425.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:06] [ns_1@127.0.0.1:<0.26541.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:06] [ns_1@127.0.0.1:<0.26489.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:07] [ns_1@127.0.0.1:<0.26534.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:07] [ns_1@127.0.0.1:<0.26439.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:08] [ns_1@127.0.0.1:<0.26557.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:08] [ns_1@127.0.0.1:<0.26499.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:09] [ns_1@127.0.0.1:<0.26550.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:09] [ns_1@127.0.0.1:<0.26457.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:09] [ns_1@127.0.0.1:<0.26578.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:22:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26547.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:22:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26584.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:22:10] [ns_1@127.0.0.1:<0.26567.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:10] [ns_1@127.0.0.1:<0.26519.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:10] [ns_1@127.0.0.1:<0.26578.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:11] [ns_1@127.0.0.1:<0.26561.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:11] [ns_1@127.0.0.1:<0.26470.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:11] [ns_1@127.0.0.1:<0.26578.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:12] [ns_1@127.0.0.1:<0.26587.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:12] [ns_1@127.0.0.1:<0.26532.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:12] [ns_1@127.0.0.1:<0.26578.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:13] [ns_1@127.0.0.1:<0.26572.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:13] [ns_1@127.0.0.1:<0.26484.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:22:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26578.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:22:14] [ns_1@127.0.0.1:<0.26600.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:14] [ns_1@127.0.0.1:<0.26548.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:22:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26584.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:22:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26618.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:22:15] [ns_1@127.0.0.1:<0.26592.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:15] [ns_1@127.0.0.1:<0.26495.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:16] [ns_1@127.0.0.1:<0.26612.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:16] [ns_1@127.0.0.1:<0.26559.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:17] [ns_1@127.0.0.1:<0.26605.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:17] [ns_1@127.0.0.1:<0.26512.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:18] [ns_1@127.0.0.1:<0.26627.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:18] [ns_1@127.0.0.1:<0.26570.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:19] [ns_1@127.0.0.1:<0.26621.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:19] [ns_1@127.0.0.1:<0.26526.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:19] [ns_1@127.0.0.1:<0.26646.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:22:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26618.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:22:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26653.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:22:20] [ns_1@127.0.0.1:<0.26638.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:20] [ns_1@127.0.0.1:<0.26590.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:20] [ns_1@127.0.0.1:<0.26646.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:21] [ns_1@127.0.0.1:<0.26631.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:21] [ns_1@127.0.0.1:<0.26539.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:21] [ns_1@127.0.0.1:<0.26646.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:22] [ns_1@127.0.0.1:<0.26656.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:22] [ns_1@127.0.0.1:<0.26603.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:22] [ns_1@127.0.0.1:<0.26646.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:23] [ns_1@127.0.0.1:<0.26642.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:22:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26646.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:22:23] [ns_1@127.0.0.1:<0.26555.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:24] [ns_1@127.0.0.1:<0.26669.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:24] [ns_1@127.0.0.1:<0.26619.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:22:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26653.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:22:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26687.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:22:25] [ns_1@127.0.0.1:<0.26662.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:25] [ns_1@127.0.0.1:<0.26565.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:26] [ns_1@127.0.0.1:<0.26682.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:26] [ns_1@127.0.0.1:<0.26629.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:27] [ns_1@127.0.0.1:<0.26677.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:27] [ns_1@127.0.0.1:<0.26585.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:28] [ns_1@127.0.0.1:<0.26697.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:28] [ns_1@127.0.0.1:<0.26640.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:29] [ns_1@127.0.0.1:<0.26692.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:29] [ns_1@127.0.0.1:<0.26713.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:29] [ns_1@127.0.0.1:<0.26598.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:22:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26687.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:22:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26722.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:22:30] [ns_1@127.0.0.1:<0.26707.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:30] [ns_1@127.0.0.1:<0.26713.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:30] [ns_1@127.0.0.1:<0.26660.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:31] [ns_1@127.0.0.1:<0.26703.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:31] [ns_1@127.0.0.1:<0.26713.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:31] [ns_1@127.0.0.1:<0.26610.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:32] [ns_1@127.0.0.1:<0.26726.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:32] [ns_1@127.0.0.1:<0.26713.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:32] [ns_1@127.0.0.1:<0.26672.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:33] [ns_1@127.0.0.1:<0.26719.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:33] [ns_1@127.0.0.1:<0.26733.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:33] [ns_1@127.0.0.1:<0.26625.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:22:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26713.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:22:33] [ns_1@127.0.0.1:<0.26636.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:34] [ns_1@127.0.0.1:<0.26739.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:34] [ns_1@127.0.0.1:<0.26688.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:22:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26722.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:22:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26760.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:22:35] [ns_1@127.0.0.1:<0.26750.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:35] [ns_1@127.0.0.1:<0.26654.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:36] [ns_1@127.0.0.1:<0.26757.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:36] [ns_1@127.0.0.1:<0.26699.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:37] [ns_1@127.0.0.1:<0.26766.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:38] [ns_1@127.0.0.1:<0.26667.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:38] [ns_1@127.0.0.1:<0.26770.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:39] [ns_1@127.0.0.1:<0.26709.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:39] [ns_1@127.0.0.1:<0.26776.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:39] [ns_1@127.0.0.1:<0.26787.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:22:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26760.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:22:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26793.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:22:40] [ns_1@127.0.0.1:<0.26680.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:40] [ns_1@127.0.0.1:<0.26781.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:40] [ns_1@127.0.0.1:<0.26787.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:41] [ns_1@127.0.0.1:<0.26728.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:41] [ns_1@127.0.0.1:<0.26794.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:41] [ns_1@127.0.0.1:<0.26787.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:42] [ns_1@127.0.0.1:<0.26694.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:42] [ns_1@127.0.0.1:<0.26799.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:42] [ns_1@127.0.0.1:<0.26787.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:43] [ns_1@127.0.0.1:<0.26741.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:43] [ns_1@127.0.0.1:<0.26807.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:22:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26787.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:22:44] [ns_1@127.0.0.1:<0.26705.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:44] [ns_1@127.0.0.1:<0.26812.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:22:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26793.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:22:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26827.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:22:45] [ns_1@127.0.0.1:<0.26743.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:45] [ns_1@127.0.0.1:<0.26819.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:46] [ns_1@127.0.0.1:<0.26723.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:46] [ns_1@127.0.0.1:<0.26828.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:47] [ns_1@127.0.0.1:<0.26745.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:47] [ns_1@127.0.0.1:<0.26834.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:48] [ns_1@127.0.0.1:<0.26735.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:48] [ns_1@127.0.0.1:<0.26838.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:49] [ns_1@127.0.0.1:<0.26761.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:49] [ns_1@127.0.0.1:<0.26845.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:49] [ns_1@127.0.0.1:<0.26855.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:22:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26827.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:22:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26862.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:22:50] [ns_1@127.0.0.1:<0.26752.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:50] [ns_1@127.0.0.1:<0.26849.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:50] [ns_1@127.0.0.1:<0.26855.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:51] [ns_1@127.0.0.1:<0.26772.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:51] [ns_1@127.0.0.1:<0.26863.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:51] [ns_1@127.0.0.1:<0.26855.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:52] [ns_1@127.0.0.1:<0.26768.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:52] [ns_1@127.0.0.1:<0.26869.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:52] [ns_1@127.0.0.1:<0.26855.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:53] [ns_1@127.0.0.1:<0.26783.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:53] [ns_1@127.0.0.1:<0.26876.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:22:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26855.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:22:54] [ns_1@127.0.0.1:<0.26778.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:54] [ns_1@127.0.0.1:<0.26881.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:22:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26862.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:22:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26896.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:22:55] [ns_1@127.0.0.1:<0.26801.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:55] [ns_1@127.0.0.1:<0.26889.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:56] [ns_1@127.0.0.1:<0.26796.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:56] [ns_1@127.0.0.1:<0.26897.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:57] [ns_1@127.0.0.1:<0.26814.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:57] [ns_1@127.0.0.1:<0.26903.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:58] [ns_1@127.0.0.1:<0.26809.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:58] [ns_1@127.0.0.1:<0.26908.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:22:59] [ns_1@127.0.0.1:<0.26830.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:22:59] [ns_1@127.0.0.1:<0.26923.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:22:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,750174,857219}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37546488}, {processes,10248840}, {processes_used,8623360}, {system,27297648}, {atom,1306681}, {atom_used,1284164}, {binary,502104}, {code,12859877}, {ets,1985928}]}, {system_stats, [{cpu_utilization_rate,25.376884422110553}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,1196}, {memory_data,{4040077312,4011536384,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27872 kB\nBuffers: 56184 kB\nCached: 3532696 kB\nSwapCached: 0 kB\nActive: 304408 kB\nInactive: 3444836 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27872 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 76 kB\nWriteback: 0 kB\nAnonPages: 160372 kB\nMapped: 24868 kB\nSlab: 134424 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579892 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3617480704}, {buffered_memory,57532416}, {free_memory,28540928}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{1191593,0}}, {context_switches,{662847,0}}, {garbage_collection,{334217,395733990,0}}, {io,{{input,18406498},{output,21675160}}}, {reductions,{151137123,655197}}, {run_queue,0}, {runtime,{25660,140}}]}]}] [stats:error] [2012-03-26 1:22:59] [ns_1@127.0.0.1:<0.26914.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:23:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26896.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:23:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26932.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:23:00] [ns_1@127.0.0.1:<0.26821.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:00] [ns_1@127.0.0.1:<0.26923.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:00] [ns_1@127.0.0.1:<0.26918.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:01] [ns_1@127.0.0.1:<0.26840.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:01] [ns_1@127.0.0.1:<0.26923.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:01] [ns_1@127.0.0.1:<0.26933.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:02] [ns_1@127.0.0.1:<0.26836.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:02] [ns_1@127.0.0.1:<0.26923.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:02] [ns_1@127.0.0.1:<0.26938.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:03] [ns_1@127.0.0.1:<0.26851.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:23:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26923.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:23:03] [ns_1@127.0.0.1:<0.26945.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:04] [ns_1@127.0.0.1:<0.26847.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:04] [ns_1@127.0.0.1:<0.26951.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:23:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26932.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:23:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26966.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:23:05] [ns_1@127.0.0.1:<0.26871.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:05] [ns_1@127.0.0.1:<0.26958.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:06] [ns_1@127.0.0.1:<0.26865.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:06] [ns_1@127.0.0.1:<0.26967.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:07] [ns_1@127.0.0.1:<0.26883.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:07] [ns_1@127.0.0.1:<0.26974.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:08] [ns_1@127.0.0.1:<0.26878.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:08] [ns_1@127.0.0.1:<0.26978.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:09] [ns_1@127.0.0.1:<0.26899.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:09] [ns_1@127.0.0.1:<0.26995.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:09] [ns_1@127.0.0.1:<0.26984.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:23:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26966.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:23:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27003.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:23:10] [ns_1@127.0.0.1:<0.26891.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:10] [ns_1@127.0.0.1:<0.26995.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:10] [ns_1@127.0.0.1:<0.26989.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:11] [ns_1@127.0.0.1:<0.26912.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:11] [ns_1@127.0.0.1:<0.26995.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:11] [ns_1@127.0.0.1:<0.27004.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:12] [ns_1@127.0.0.1:<0.26905.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:12] [ns_1@127.0.0.1:<0.26995.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:13] [ns_1@127.0.0.1:<0.27009.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:13] [ns_1@127.0.0.1:<0.26926.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:23:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26995.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:23:14] [ns_1@127.0.0.1:<0.27017.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:14] [ns_1@127.0.0.1:<0.26916.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:23:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27003.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:23:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27035.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:23:15] [ns_1@127.0.0.1:<0.27022.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:15] [ns_1@127.0.0.1:<0.26943.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:16] [ns_1@127.0.0.1:<0.27029.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:16] [ns_1@127.0.0.1:<0.26936.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:17] [ns_1@127.0.0.1:<0.27038.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:17] [ns_1@127.0.0.1:<0.26956.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:18] [ns_1@127.0.0.1:<0.27044.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:18] [ns_1@127.0.0.1:<0.26949.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:19] [ns_1@127.0.0.1:<0.27048.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:19] [ns_1@127.0.0.1:<0.26972.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:19] [ns_1@127.0.0.1:<0.27063.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:23:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27035.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:23:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27070.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:23:20] [ns_1@127.0.0.1:<0.27055.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:20] [ns_1@127.0.0.1:<0.26963.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:20] [ns_1@127.0.0.1:<0.27063.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:21] [ns_1@127.0.0.1:<0.27059.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:21] [ns_1@127.0.0.1:<0.26982.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:21] [ns_1@127.0.0.1:<0.27063.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:22] [ns_1@127.0.0.1:<0.27073.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:22] [ns_1@127.0.0.1:<0.26976.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:22] [ns_1@127.0.0.1:<0.27063.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:23] [ns_1@127.0.0.1:<0.27079.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:23] [ns_1@127.0.0.1:<0.27000.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:23:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27063.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:23:24] [ns_1@127.0.0.1:<0.27086.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:24] [ns_1@127.0.0.1:<0.26987.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:23:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27070.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:23:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27104.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:23:25] [ns_1@127.0.0.1:<0.27091.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:25] [ns_1@127.0.0.1:<0.27015.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:26] [ns_1@127.0.0.1:<0.27099.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:26] [ns_1@127.0.0.1:<0.27007.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:27] [ns_1@127.0.0.1:<0.27107.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:27] [ns_1@127.0.0.1:<0.27027.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:28] [ns_1@127.0.0.1:<0.27113.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:28] [ns_1@127.0.0.1:<0.27020.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:29] [ns_1@127.0.0.1:<0.27118.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:29] [ns_1@127.0.0.1:<0.27042.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:29] [ns_1@127.0.0.1:<0.27132.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:23:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27104.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:23:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27139.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:23:30] [ns_1@127.0.0.1:<0.27124.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:30] [ns_1@127.0.0.1:<0.27036.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:30] [ns_1@127.0.0.1:<0.27132.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:31] [ns_1@127.0.0.1:<0.27128.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:31] [ns_1@127.0.0.1:<0.27053.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:31] [ns_1@127.0.0.1:<0.27132.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:32] [ns_1@127.0.0.1:<0.27142.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:32] [ns_1@127.0.0.1:<0.27132.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:32] [ns_1@127.0.0.1:<0.27046.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:33] [ns_1@127.0.0.1:<0.27147.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:23:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27132.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:23:33] [ns_1@127.0.0.1:<0.27071.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:33] [ns_1@127.0.0.1:<0.27084.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:33] [ns_1@127.0.0.1:<0.27097.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:34] [ns_1@127.0.0.1:<0.27154.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:34] [ns_1@127.0.0.1:<0.27057.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:23:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27139.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:23:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27177.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:23:35] [ns_1@127.0.0.1:<0.27163.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:35] [ns_1@127.0.0.1:<0.27111.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:36] [ns_1@127.0.0.1:<0.27171.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:36] [ns_1@127.0.0.1:<0.27077.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:37] [ns_1@127.0.0.1:<0.27165.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:37] [ns_1@127.0.0.1:<0.27122.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:38] [ns_1@127.0.0.1:<0.27187.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:38] [ns_1@127.0.0.1:<0.27089.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:39] [ns_1@127.0.0.1:<0.27167.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:39] [ns_1@127.0.0.1:<0.27204.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:39] [ns_1@127.0.0.1:<0.27140.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:23:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27177.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:23:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27212.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:23:40] [ns_1@127.0.0.1:<0.27198.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:40] [ns_1@127.0.0.1:<0.27204.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:40] [ns_1@127.0.0.1:<0.27105.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:41] [ns_1@127.0.0.1:<0.27183.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:41] [ns_1@127.0.0.1:<0.27204.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:41] [ns_1@127.0.0.1:<0.27152.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:42] [ns_1@127.0.0.1:<0.27216.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:42] [ns_1@127.0.0.1:<0.27204.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:42] [ns_1@127.0.0.1:<0.27116.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:43] [ns_1@127.0.0.1:<0.27193.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:23:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27204.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:23:43] [ns_1@127.0.0.1:<0.27169.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:44] [ns_1@127.0.0.1:<0.27229.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:44] [ns_1@127.0.0.1:<0.27126.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:23:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27212.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:23:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27246.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:23:45] [ns_1@127.0.0.1:<0.27209.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:45] [ns_1@127.0.0.1:<0.27185.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:46] [ns_1@127.0.0.1:<0.27243.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:47] [ns_1@127.0.0.1:<0.27145.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:47] [ns_1@127.0.0.1:<0.27224.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:48] [ns_1@127.0.0.1:<0.27195.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:48] [ns_1@127.0.0.1:<0.27255.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:49] [ns_1@127.0.0.1:<0.27158.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:49] [ns_1@127.0.0.1:<0.27236.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:49] [ns_1@127.0.0.1:<0.27272.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:23:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27246.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:23:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27279.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:23:50] [ns_1@127.0.0.1:<0.27213.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:50] [ns_1@127.0.0.1:<0.27266.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:50] [ns_1@127.0.0.1:<0.27272.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:51] [ns_1@127.0.0.1:<0.27178.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:51] [ns_1@127.0.0.1:<0.27251.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:51] [ns_1@127.0.0.1:<0.27272.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:52] [ns_1@127.0.0.1:<0.27226.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:52] [ns_1@127.0.0.1:<0.27286.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:52] [ns_1@127.0.0.1:<0.27272.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:53] [ns_1@127.0.0.1:<0.27189.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:53] [ns_1@127.0.0.1:<0.27262.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:23:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27272.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:23:54] [ns_1@127.0.0.1:<0.27238.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:54] [ns_1@127.0.0.1:<0.27298.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:23:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27279.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:23:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27313.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:23:55] [ns_1@127.0.0.1:<0.27200.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:55] [ns_1@127.0.0.1:<0.27280.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:56] [ns_1@127.0.0.1:<0.27253.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:56] [ns_1@127.0.0.1:<0.27314.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:57] [ns_1@127.0.0.1:<0.27218.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:57] [ns_1@127.0.0.1:<0.27293.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:58] [ns_1@127.0.0.1:<0.27264.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:58] [ns_1@127.0.0.1:<0.27325.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:59] [ns_1@127.0.0.1:<0.27231.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:23:59] [ns_1@127.0.0.1:<0.27306.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:23:59] [ns_1@127.0.0.1:<0.27357.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:23:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,750234,882248}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37690288}, {processes,10414272}, {processes_used,8788792}, {system,27276016}, {atom,1306681}, {atom_used,1284164}, {binary,501584}, {code,12859877}, {ets,1957672}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,1256}, {memory_data,{4040077312,4011663360,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27624 kB\nBuffers: 56260 kB\nCached: 3532856 kB\nSwapCached: 0 kB\nActive: 304508 kB\nInactive: 3444976 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27624 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 64 kB\nWriteback: 0 kB\nAnonPages: 160384 kB\nMapped: 24868 kB\nSlab: 134412 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579892 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3617644544}, {buffered_memory,57610240}, {free_memory,28286976}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{1251618,0}}, {context_switches,{675871,0}}, {garbage_collection,{341519,406955414,0}}, {io,{{input,18437096},{output,22117433}}}, {reductions,{153749913,622245}}, {run_queue,0}, {runtime,{26210,130}}]}]}] [error_logger:error] [2012-03-26 1:24:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27313.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:24:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27364.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:24:00] [ns_1@127.0.0.1:<0.27282.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:00] [ns_1@127.0.0.1:<0.27335.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:00] [ns_1@127.0.0.1:<0.27357.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:01] [ns_1@127.0.0.1:<0.27247.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:01] [ns_1@127.0.0.1:<0.27320.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:01] [ns_1@127.0.0.1:<0.27357.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:02] [ns_1@127.0.0.1:<0.27295.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:02] [ns_1@127.0.0.1:<0.27370.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:02] [ns_1@127.0.0.1:<0.27357.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:03] [ns_1@127.0.0.1:<0.27257.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:03] [ns_1@127.0.0.1:<0.27331.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:24:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27357.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:24:04] [ns_1@127.0.0.1:<0.27308.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:04] [ns_1@127.0.0.1:<0.27383.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:24:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27364.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:24:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27398.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:24:05] [ns_1@127.0.0.1:<0.27268.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:05] [ns_1@127.0.0.1:<0.27365.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:06] [ns_1@127.0.0.1:<0.27322.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:06] [ns_1@127.0.0.1:<0.27399.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:07] [ns_1@127.0.0.1:<0.27288.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:07] [ns_1@127.0.0.1:<0.27377.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:08] [ns_1@127.0.0.1:<0.27333.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:08] [ns_1@127.0.0.1:<0.27410.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:09] [ns_1@127.0.0.1:<0.27300.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:09] [ns_1@127.0.0.1:<0.27427.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:09] [ns_1@127.0.0.1:<0.27390.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:24:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27398.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:24:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27435.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:24:10] [ns_1@127.0.0.1:<0.27367.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:10] [ns_1@127.0.0.1:<0.27427.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:10] [ns_1@127.0.0.1:<0.27421.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:11] [ns_1@127.0.0.1:<0.27316.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:11] [ns_1@127.0.0.1:<0.27427.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:11] [ns_1@127.0.0.1:<0.27406.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:12] [ns_1@127.0.0.1:<0.27379.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:12] [ns_1@127.0.0.1:<0.27427.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:12] [ns_1@127.0.0.1:<0.27441.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:13] [ns_1@127.0.0.1:<0.27327.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:24:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27427.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:24:13] [ns_1@127.0.0.1:<0.27416.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:14] [ns_1@127.0.0.1:<0.27392.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:14] [ns_1@127.0.0.1:<0.27454.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:24:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27435.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:24:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27469.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:24:15] [ns_1@127.0.0.1:<0.27352.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:15] [ns_1@127.0.0.1:<0.27436.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:16] [ns_1@127.0.0.1:<0.27408.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:16] [ns_1@127.0.0.1:<0.27470.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:17] [ns_1@127.0.0.1:<0.27372.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:17] [ns_1@127.0.0.1:<0.27449.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:18] [ns_1@127.0.0.1:<0.27418.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:18] [ns_1@127.0.0.1:<0.27480.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:19] [ns_1@127.0.0.1:<0.27385.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:19] [ns_1@127.0.0.1:<0.27495.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:19] [ns_1@127.0.0.1:<0.27461.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:24:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27469.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:24:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27504.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:24:20] [ns_1@127.0.0.1:<0.27439.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:20] [ns_1@127.0.0.1:<0.27495.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:20] [ns_1@127.0.0.1:<0.27491.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:21] [ns_1@127.0.0.1:<0.27401.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:21] [ns_1@127.0.0.1:<0.27495.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:21] [ns_1@127.0.0.1:<0.27476.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:22] [ns_1@127.0.0.1:<0.27452.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:22] [ns_1@127.0.0.1:<0.27495.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:23] [ns_1@127.0.0.1:<0.27511.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:23] [ns_1@127.0.0.1:<0.27414.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:24:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27495.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:24:24] [ns_1@127.0.0.1:<0.27487.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:24] [ns_1@127.0.0.1:<0.27464.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:24:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27504.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:24:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27536.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:24:25] [ns_1@127.0.0.1:<0.27523.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:25] [ns_1@127.0.0.1:<0.27430.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:26] [ns_1@127.0.0.1:<0.27505.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:26] [ns_1@127.0.0.1:<0.27478.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:27] [ns_1@127.0.0.1:<0.27539.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:27] [ns_1@127.0.0.1:<0.27447.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:28] [ns_1@127.0.0.1:<0.27518.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:28] [ns_1@127.0.0.1:<0.27489.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:29] [ns_1@127.0.0.1:<0.27550.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:29] [ns_1@127.0.0.1:<0.27459.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:29] [ns_1@127.0.0.1:<0.27564.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:24:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27536.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:24:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27571.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:24:30] [ns_1@127.0.0.1:<0.27531.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:30] [ns_1@127.0.0.1:<0.27509.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:30] [ns_1@127.0.0.1:<0.27564.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:31] [ns_1@127.0.0.1:<0.27560.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:31] [ns_1@127.0.0.1:<0.27474.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:31] [ns_1@127.0.0.1:<0.27564.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:32] [ns_1@127.0.0.1:<0.27545.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:32] [ns_1@127.0.0.1:<0.27521.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:32] [ns_1@127.0.0.1:<0.27564.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:33] [ns_1@127.0.0.1:<0.27579.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:33] [ns_1@127.0.0.1:<0.27485.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:24:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27564.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:24:34] [ns_1@127.0.0.1:<0.27556.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:34] [ns_1@127.0.0.1:<0.27574.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:34] [ns_1@127.0.0.1:<0.27586.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:34] [ns_1@127.0.0.1:<0.27537.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:24:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27571.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:24:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27609.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:24:35] [ns_1@127.0.0.1:<0.27592.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:35] [ns_1@127.0.0.1:<0.27501.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:36] [ns_1@127.0.0.1:<0.27603.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:36] [ns_1@127.0.0.1:<0.27548.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:37] [ns_1@127.0.0.1:<0.27612.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:37] [ns_1@127.0.0.1:<0.27516.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:38] [ns_1@127.0.0.1:<0.27619.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:38] [ns_1@127.0.0.1:<0.27558.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:39] [ns_1@127.0.0.1:<0.27623.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:39] [ns_1@127.0.0.1:<0.27529.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:39] [ns_1@127.0.0.1:<0.27638.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:24:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27609.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:24:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27644.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:24:40] [ns_1@127.0.0.1:<0.27629.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:40] [ns_1@127.0.0.1:<0.27577.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:40] [ns_1@127.0.0.1:<0.27638.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:41] [ns_1@127.0.0.1:<0.27634.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:41] [ns_1@127.0.0.1:<0.27638.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:41] [ns_1@127.0.0.1:<0.27543.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:42] [ns_1@127.0.0.1:<0.27647.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:42] [ns_1@127.0.0.1:<0.27638.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:42] [ns_1@127.0.0.1:<0.27590.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:43] [ns_1@127.0.0.1:<0.27655.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:24:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27638.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:24:43] [ns_1@127.0.0.1:<0.27554.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:44] [ns_1@127.0.0.1:<0.27661.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:44] [ns_1@127.0.0.1:<0.27610.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:24:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27644.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:24:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27678.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:24:45] [ns_1@127.0.0.1:<0.27668.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:45] [ns_1@127.0.0.1:<0.27572.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:46] [ns_1@127.0.0.1:<0.27673.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:46] [ns_1@127.0.0.1:<0.27621.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:47] [ns_1@127.0.0.1:<0.27683.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:47] [ns_1@127.0.0.1:<0.27584.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:48] [ns_1@127.0.0.1:<0.27687.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:48] [ns_1@127.0.0.1:<0.27632.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:49] [ns_1@127.0.0.1:<0.27694.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:49] [ns_1@127.0.0.1:<0.27704.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:49] [ns_1@127.0.0.1:<0.27597.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:24:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27678.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:24:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27713.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:24:50] [ns_1@127.0.0.1:<0.27698.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:50] [ns_1@127.0.0.1:<0.27704.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:50] [ns_1@127.0.0.1:<0.27650.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:51] [ns_1@127.0.0.1:<0.27707.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:51] [ns_1@127.0.0.1:<0.27704.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:51] [ns_1@127.0.0.1:<0.27599.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:52] [ns_1@127.0.0.1:<0.27718.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:52] [ns_1@127.0.0.1:<0.27704.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:52] [ns_1@127.0.0.1:<0.27663.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:53] [ns_1@127.0.0.1:<0.27725.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:24:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27704.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:24:53] [ns_1@127.0.0.1:<0.27601.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:54] [ns_1@127.0.0.1:<0.27730.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:54] [ns_1@127.0.0.1:<0.27679.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:24:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27713.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:24:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27747.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:24:55] [ns_1@127.0.0.1:<0.27738.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:55] [ns_1@127.0.0.1:<0.27617.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:56] [ns_1@127.0.0.1:<0.27744.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:57] [ns_1@127.0.0.1:<0.27689.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:57] [ns_1@127.0.0.1:<0.27752.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:58] [ns_1@127.0.0.1:<0.27627.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:58] [ns_1@127.0.0.1:<0.27757.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:59] [ns_1@127.0.0.1:<0.27700.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:24:59] [ns_1@127.0.0.1:<0.27763.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:24:59] [ns_1@127.0.0.1:<0.27774.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:24:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,750294,910447}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37467624}, {processes,10149248}, {processes_used,8523768}, {system,27318376}, {atom,1306681}, {atom_used,1284164}, {binary,506288}, {code,12859877}, {ets,1988936}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,1316}, {memory_data,{4040077312,4011790336,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27128 kB\nBuffers: 56368 kB\nCached: 3532996 kB\nSwapCached: 0 kB\nActive: 304572 kB\nInactive: 3445192 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27128 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 76 kB\nWriteback: 0 kB\nAnonPages: 160400 kB\nMapped: 24868 kB\nSlab: 134404 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579892 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3617787904}, {buffered_memory,57720832}, {free_memory,27779072}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{1311646,0}}, {context_switches,{689272,0}}, {garbage_collection,{348863,418327054,0}}, {io,{{input,18713720},{output,22786182}}}, {reductions,{156408426,641059}}, {run_queue,0}, {runtime,{26800,170}}]}]}] [error_logger:error] [2012-03-26 1:25:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27747.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:25:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27781.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:25:00] [ns_1@127.0.0.1:<0.27645.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:00] [ns_1@127.0.0.1:<0.27767.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:00] [ns_1@127.0.0.1:<0.27774.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:01] [ns_1@127.0.0.1:<0.27720.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:01] [ns_1@127.0.0.1:<0.27782.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:01] [ns_1@127.0.0.1:<0.27774.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:02] [ns_1@127.0.0.1:<0.27658.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:02] [ns_1@127.0.0.1:<0.27787.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:02] [ns_1@127.0.0.1:<0.27774.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:03] [ns_1@127.0.0.1:<0.27732.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:03] [ns_1@127.0.0.1:<0.27794.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:25:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27774.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:25:04] [ns_1@127.0.0.1:<0.27670.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:04] [ns_1@127.0.0.1:<0.27800.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:25:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27781.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:25:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27815.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:25:05] [ns_1@127.0.0.1:<0.27748.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:05] [ns_1@127.0.0.1:<0.27807.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:06] [ns_1@127.0.0.1:<0.27685.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:06] [ns_1@127.0.0.1:<0.27816.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:07] [ns_1@127.0.0.1:<0.27759.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:07] [ns_1@127.0.0.1:<0.27823.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:08] [ns_1@127.0.0.1:<0.27696.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:08] [ns_1@127.0.0.1:<0.27827.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:09] [ns_1@127.0.0.1:<0.27769.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:09] [ns_1@127.0.0.1:<0.27833.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:09] [ns_1@127.0.0.1:<0.27846.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:25:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27815.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:25:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27852.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:25:10] [ns_1@127.0.0.1:<0.27714.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:10] [ns_1@127.0.0.1:<0.27838.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:10] [ns_1@127.0.0.1:<0.27846.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:11] [ns_1@127.0.0.1:<0.27789.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:11] [ns_1@127.0.0.1:<0.27853.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:11] [ns_1@127.0.0.1:<0.27846.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:12] [ns_1@127.0.0.1:<0.27727.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:12] [ns_1@127.0.0.1:<0.27858.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:12] [ns_1@127.0.0.1:<0.27846.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:13] [ns_1@127.0.0.1:<0.27802.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:13] [ns_1@127.0.0.1:<0.27866.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:25:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27846.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:25:14] [ns_1@127.0.0.1:<0.27740.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:14] [ns_1@127.0.0.1:<0.27871.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:25:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27852.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:25:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27886.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:25:15] [ns_1@127.0.0.1:<0.27818.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:15] [ns_1@127.0.0.1:<0.27878.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:16] [ns_1@127.0.0.1:<0.27754.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:16] [ns_1@127.0.0.1:<0.27887.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:17] [ns_1@127.0.0.1:<0.27829.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:17] [ns_1@127.0.0.1:<0.27893.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:18] [ns_1@127.0.0.1:<0.27765.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:18] [ns_1@127.0.0.1:<0.27897.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:19] [ns_1@127.0.0.1:<0.27840.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:19] [ns_1@127.0.0.1:<0.27912.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:19] [ns_1@127.0.0.1:<0.27904.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:25:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27886.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:25:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27921.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:25:20] [ns_1@127.0.0.1:<0.27784.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:20] [ns_1@127.0.0.1:<0.27912.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:20] [ns_1@127.0.0.1:<0.27908.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:21] [ns_1@127.0.0.1:<0.27860.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:21] [ns_1@127.0.0.1:<0.27912.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:21] [ns_1@127.0.0.1:<0.27922.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:22] [ns_1@127.0.0.1:<0.27796.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:22] [ns_1@127.0.0.1:<0.27912.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:22] [ns_1@127.0.0.1:<0.27928.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:23] [ns_1@127.0.0.1:<0.27873.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:25:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27912.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:25:23] [ns_1@127.0.0.1:<0.27935.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:24] [ns_1@127.0.0.1:<0.27809.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:24] [ns_1@127.0.0.1:<0.27940.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:25:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27921.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:25:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27955.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:25:25] [ns_1@127.0.0.1:<0.27889.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:25] [ns_1@127.0.0.1:<0.27948.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:26] [ns_1@127.0.0.1:<0.27825.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:26] [ns_1@127.0.0.1:<0.27956.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:27] [ns_1@127.0.0.1:<0.27901.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:27] [ns_1@127.0.0.1:<0.27962.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:28] [ns_1@127.0.0.1:<0.27835.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:28] [ns_1@127.0.0.1:<0.27967.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:29] [ns_1@127.0.0.1:<0.27915.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:29] [ns_1@127.0.0.1:<0.27981.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:29] [ns_1@127.0.0.1:<0.27973.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:25:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27955.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:25:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27990.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:25:30] [ns_1@127.0.0.1:<0.27855.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:30] [ns_1@127.0.0.1:<0.27981.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:30] [ns_1@127.0.0.1:<0.27977.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:31] [ns_1@127.0.0.1:<0.27933.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:31] [ns_1@127.0.0.1:<0.27981.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:31] [ns_1@127.0.0.1:<0.27991.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:32] [ns_1@127.0.0.1:<0.27868.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:32] [ns_1@127.0.0.1:<0.27981.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:33] [ns_1@127.0.0.1:<0.27996.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:33] [ns_1@127.0.0.1:<0.27946.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:25:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27981.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:25:34] [ns_1@127.0.0.1:<0.28003.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:34] [ns_1@127.0.0.1:<0.27880.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:34] [ns_1@127.0.0.1:<0.27895.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:34] [ns_1@127.0.0.1:<0.27906.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:34] [ns_1@127.0.0.1:<0.27926.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:34] [ns_1@127.0.0.1:<0.27938.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:25:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27990.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:25:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28030.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:25:35] [ns_1@127.0.0.1:<0.28009.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:35] [ns_1@127.0.0.1:<0.27960.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:36] [ns_1@127.0.0.1:<0.28016.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:36] [ns_1@127.0.0.1:<0.27950.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:37] [ns_1@127.0.0.1:<0.28033.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:37] [ns_1@127.0.0.1:<0.27971.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:38] [ns_1@127.0.0.1:<0.28018.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:38] [ns_1@127.0.0.1:<0.27965.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:39] [ns_1@127.0.0.1:<0.28044.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:39] [ns_1@127.0.0.1:<0.27987.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:39] [ns_1@127.0.0.1:<0.28059.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:25:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28030.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:25:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28065.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:25:40] [ns_1@127.0.0.1:<0.28020.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:40] [ns_1@127.0.0.1:<0.27975.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:40] [ns_1@127.0.0.1:<0.28059.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:41] [ns_1@127.0.0.1:<0.28055.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:41] [ns_1@127.0.0.1:<0.28001.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:41] [ns_1@127.0.0.1:<0.28059.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:42] [ns_1@127.0.0.1:<0.28022.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:42] [ns_1@127.0.0.1:<0.27994.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:42] [ns_1@127.0.0.1:<0.28059.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:43] [ns_1@127.0.0.1:<0.28073.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:43] [ns_1@127.0.0.1:<0.28014.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:25:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28059.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:25:44] [ns_1@127.0.0.1:<0.28024.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:44] [ns_1@127.0.0.1:<0.28007.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:25:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28065.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:25:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28099.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:25:45] [ns_1@127.0.0.1:<0.28086.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:45] [ns_1@127.0.0.1:<0.28038.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:46] [ns_1@127.0.0.1:<0.28040.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:46] [ns_1@127.0.0.1:<0.28031.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:47] [ns_1@127.0.0.1:<0.28102.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:47] [ns_1@127.0.0.1:<0.28048.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:48] [ns_1@127.0.0.1:<0.28050.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:48] [ns_1@127.0.0.1:<0.28042.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:49] [ns_1@127.0.0.1:<0.28112.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:49] [ns_1@127.0.0.1:<0.28066.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:49] [ns_1@127.0.0.1:<0.28127.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:25:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28099.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:25:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28134.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:25:50] [ns_1@127.0.0.1:<0.28068.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:50] [ns_1@127.0.0.1:<0.28127.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:50] [ns_1@127.0.0.1:<0.28053.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:51] [ns_1@127.0.0.1:<0.28123.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:51] [ns_1@127.0.0.1:<0.28127.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:51] [ns_1@127.0.0.1:<0.28079.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:52] [ns_1@127.0.0.1:<0.28081.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:52] [ns_1@127.0.0.1:<0.28127.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:52] [ns_1@127.0.0.1:<0.28071.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:53] [ns_1@127.0.0.1:<0.28146.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:25:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28127.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:25:53] [ns_1@127.0.0.1:<0.28091.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:54] [ns_1@127.0.0.1:<0.28093.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:54] [ns_1@127.0.0.1:<0.28084.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:25:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28134.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:25:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28168.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:25:55] [ns_1@127.0.0.1:<0.28159.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:55] [ns_1@127.0.0.1:<0.28106.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:56] [ns_1@127.0.0.1:<0.28108.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:56] [ns_1@127.0.0.1:<0.28100.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:57] [ns_1@127.0.0.1:<0.28173.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:57] [ns_1@127.0.0.1:<0.28117.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:58] [ns_1@127.0.0.1:<0.28119.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:58] [ns_1@127.0.0.1:<0.28110.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:25:59] [ns_1@127.0.0.1:<0.28184.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:25:59] [ns_1@127.0.0.1:<0.28195.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:25:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,750354,936245}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37511792}, {processes,10219136}, {processes_used,8593656}, {system,27292656}, {atom,1306681}, {atom_used,1284164}, {binary,501416}, {code,12859877}, {ets,1960720}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,1376}, {memory_data,{4040077312,4012298240,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27004 kB\nBuffers: 56448 kB\nCached: 3533160 kB\nSwapCached: 0 kB\nActive: 304696 kB\nInactive: 3445336 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27004 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 80 kB\nWriteback: 0 kB\nAnonPages: 160416 kB\nMapped: 24868 kB\nSlab: 134436 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579892 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3617955840}, {buffered_memory,57802752}, {free_memory,27652096}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{1371672,0}}, {context_switches,{702382,0}}, {garbage_collection,{356077,429716212,0}}, {io,{{input,18744318},{output,23232005}}}, {reductions,{159027035,652783}}, {run_queue,0}, {runtime,{27380,160}}]}]}] [stats:error] [2012-03-26 1:25:59] [ns_1@127.0.0.1:<0.28135.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:26:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28168.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:26:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28204.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:26:00] [ns_1@127.0.0.1:<0.28138.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:00] [ns_1@127.0.0.1:<0.28195.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:00] [ns_1@127.0.0.1:<0.28121.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:01] [ns_1@127.0.0.1:<0.28199.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:01] [ns_1@127.0.0.1:<0.28195.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:01] [ns_1@127.0.0.1:<0.28148.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:02] [ns_1@127.0.0.1:<0.28151.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:02] [ns_1@127.0.0.1:<0.28195.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:02] [ns_1@127.0.0.1:<0.28141.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:03] [ns_1@127.0.0.1:<0.28215.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:26:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28195.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:26:03] [ns_1@127.0.0.1:<0.28161.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:04] [ns_1@127.0.0.1:<0.28163.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:04] [ns_1@127.0.0.1:<0.28153.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:26:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28204.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:26:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28238.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:26:05] [ns_1@127.0.0.1:<0.28228.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:06] [ns_1@127.0.0.1:<0.28175.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:06] [ns_1@127.0.0.1:<0.28178.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:07] [ns_1@127.0.0.1:<0.28169.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:07] [ns_1@127.0.0.1:<0.28244.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:08] [ns_1@127.0.0.1:<0.28186.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:08] [ns_1@127.0.0.1:<0.28188.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:09] [ns_1@127.0.0.1:<0.28180.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:09] [ns_1@127.0.0.1:<0.28254.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:09] [ns_1@127.0.0.1:<0.28267.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:26:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28238.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:26:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28273.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:26:10] [ns_1@127.0.0.1:<0.28205.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:10] [ns_1@127.0.0.1:<0.28208.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:10] [ns_1@127.0.0.1:<0.28267.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:11] [ns_1@127.0.0.1:<0.28190.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:11] [ns_1@127.0.0.1:<0.28274.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:11] [ns_1@127.0.0.1:<0.28267.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:12] [ns_1@127.0.0.1:<0.28217.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:12] [ns_1@127.0.0.1:<0.28221.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:12] [ns_1@127.0.0.1:<0.28267.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:13] [ns_1@127.0.0.1:<0.28210.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:13] [ns_1@127.0.0.1:<0.28287.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:26:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28267.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:26:14] [ns_1@127.0.0.1:<0.28230.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:14] [ns_1@127.0.0.1:<0.28235.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:26:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28273.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:26:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28307.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:26:15] [ns_1@127.0.0.1:<0.28223.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:15] [ns_1@127.0.0.1:<0.28299.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:16] [ns_1@127.0.0.1:<0.28246.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:16] [ns_1@127.0.0.1:<0.28248.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:17] [ns_1@127.0.0.1:<0.28239.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:17] [ns_1@127.0.0.1:<0.28314.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:18] [ns_1@127.0.0.1:<0.28256.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:18] [ns_1@127.0.0.1:<0.28259.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:19] [ns_1@127.0.0.1:<0.28250.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:19] [ns_1@127.0.0.1:<0.28325.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:19] [ns_1@127.0.0.1:<0.28335.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:26:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28307.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:26:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28342.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:26:20] [ns_1@127.0.0.1:<0.28276.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:20] [ns_1@127.0.0.1:<0.28279.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:20] [ns_1@127.0.0.1:<0.28335.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:21] [ns_1@127.0.0.1:<0.28261.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:21] [ns_1@127.0.0.1:<0.28343.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:21] [ns_1@127.0.0.1:<0.28335.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:22] [ns_1@127.0.0.1:<0.28289.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:22] [ns_1@127.0.0.1:<0.28292.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:22] [ns_1@127.0.0.1:<0.28335.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:23] [ns_1@127.0.0.1:<0.28281.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:23] [ns_1@127.0.0.1:<0.28356.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:26:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28335.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:26:24] [ns_1@127.0.0.1:<0.28301.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:24] [ns_1@127.0.0.1:<0.28308.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:26:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28342.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:26:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28376.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:26:25] [ns_1@127.0.0.1:<0.28294.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:25] [ns_1@127.0.0.1:<0.28369.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:26] [ns_1@127.0.0.1:<0.28316.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:26] [ns_1@127.0.0.1:<0.28318.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:27] [ns_1@127.0.0.1:<0.28310.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:27] [ns_1@127.0.0.1:<0.28383.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:28] [ns_1@127.0.0.1:<0.28327.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:28] [ns_1@127.0.0.1:<0.28329.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:29] [ns_1@127.0.0.1:<0.28320.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:29] [ns_1@127.0.0.1:<0.28402.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:29] [ns_1@127.0.0.1:<0.28394.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:26:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28376.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:26:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28411.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:26:30] [ns_1@127.0.0.1:<0.28345.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:30] [ns_1@127.0.0.1:<0.28402.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:30] [ns_1@127.0.0.1:<0.28349.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:31] [ns_1@127.0.0.1:<0.28331.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:31] [ns_1@127.0.0.1:<0.28402.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:31] [ns_1@127.0.0.1:<0.28412.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:32] [ns_1@127.0.0.1:<0.28358.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:32] [ns_1@127.0.0.1:<0.28402.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:32] [ns_1@127.0.0.1:<0.28361.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:33] [ns_1@127.0.0.1:<0.28351.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:26:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28402.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:26:33] [ns_1@127.0.0.1:<0.28424.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:34] [ns_1@127.0.0.1:<0.28371.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:34] [ns_1@127.0.0.1:<0.28377.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:34] [ns_1@127.0.0.1:<0.28417.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:34] [ns_1@127.0.0.1:<0.28430.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:34] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:26:34] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:26:34] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:26:34] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:26:34] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:warn] [2012-03-26 1:26:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 1:26:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:26:39] [ns_1@127.0.0.1:<0.28363.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 1:26:39] [ns_1@127.0.0.1:<0.28415.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:26:39] [ns_1@127.0.0.1:<0.28481.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:39] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:26:39] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:26:39] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:26:39] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [stats:error] [2012-03-26 1:26:39] [ns_1@127.0.0.1:<0.28428.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:39] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:26:39] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:26:40] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:26:40] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:26:40] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [error_logger:error] [2012-03-26 1:26:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28411.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:26:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28492.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:26:40] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:26:41: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 1:26:40] [ns_1@127.0.0.1:<0.28439.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:40] [ns_1@127.0.0.1:<0.28481.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:41] [ns_1@127.0.0.1:<0.28381.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:41] [ns_1@127.0.0.1:<0.28392.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:41] [ns_1@127.0.0.1:<0.28481.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:42] [ns_1@127.0.0.1:<0.28493.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:42] [ns_1@127.0.0.1:<0.28441.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:42] [ns_1@127.0.0.1:<0.28481.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:43] [ns_1@127.0.0.1:<0.28498.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:43] [ns_1@127.0.0.1:<0.28406.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:26:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28481.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:26:44] [ns_1@127.0.0.1:<0.28506.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:44] [ns_1@127.0.0.1:<0.28444.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:26:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28492.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:26:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28524.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:26:45] [ns_1@127.0.0.1:<0.28511.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:45] [ns_1@127.0.0.1:<0.28422.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:46] [ns_1@127.0.0.1:<0.28518.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:46] [ns_1@127.0.0.1:<0.28447.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:47] [ns_1@127.0.0.1:<0.28527.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:47] [ns_1@127.0.0.1:<0.28435.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:48] [ns_1@127.0.0.1:<0.28533.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:48] [ns_1@127.0.0.1:<0.28449.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:49] [ns_1@127.0.0.1:<0.28537.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:49] [ns_1@127.0.0.1:<0.28474.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:49] [ns_1@127.0.0.1:<0.28552.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:26:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28524.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:26:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28559.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:26:50] [ns_1@127.0.0.1:<0.28544.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:50] [ns_1@127.0.0.1:<0.28454.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:50] [ns_1@127.0.0.1:<0.28552.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:51] [ns_1@127.0.0.1:<0.28548.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:51] [ns_1@127.0.0.1:<0.28475.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:51] [ns_1@127.0.0.1:<0.28552.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:52] [ns_1@127.0.0.1:<0.28562.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:52] [ns_1@127.0.0.1:<0.28496.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:52] [ns_1@127.0.0.1:<0.28552.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:53] [ns_1@127.0.0.1:<0.28568.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:53] [ns_1@127.0.0.1:<0.28476.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:26:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28552.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:26:54] [ns_1@127.0.0.1:<0.28575.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:54] [ns_1@127.0.0.1:<0.28509.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:26:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28559.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:26:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28593.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:26:55] [ns_1@127.0.0.1:<0.28580.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:55] [ns_1@127.0.0.1:<0.28477.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:56] [ns_1@127.0.0.1:<0.28588.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:56] [ns_1@127.0.0.1:<0.28525.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:57] [ns_1@127.0.0.1:<0.28596.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:57] [ns_1@127.0.0.1:<0.28484.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:58] [ns_1@127.0.0.1:<0.28602.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:58] [ns_1@127.0.0.1:<0.28535.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:26:59] [ns_1@127.0.0.1:<0.28607.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:26:59] [ns_1@127.0.0.1:<0.28633.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:26:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,750414,964369}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37756464}, {processes,10396576}, {processes_used,8771096}, {system,27359888}, {atom,1306681}, {atom_used,1284164}, {binary,532840}, {code,12859877}, {ets,1989648}]}, {system_stats, [{cpu_utilization_rate,25.628140703517587}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,1436}, {memory_data,{4040077312,4012425216,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26756 kB\nBuffers: 56552 kB\nCached: 3533324 kB\nSwapCached: 0 kB\nActive: 304824 kB\nInactive: 3445500 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26756 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 84 kB\nWriteback: 0 kB\nAnonPages: 160440 kB\nMapped: 24868 kB\nSlab: 134472 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580356 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3618123776}, {buffered_memory,57909248}, {free_memory,27398144}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{1431701,1}}, {context_switches,{715610,0}}, {garbage_collection,{363177,441272779,0}}, {io,{{input,18780602},{output,23712993}}}, {reductions,{161638119,658160}}, {run_queue,0}, {runtime,{28020,180}}]}]}] [stats:error] [2012-03-26 1:26:59] [ns_1@127.0.0.1:<0.28504.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:27:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28593.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:27:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28642.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:27:00] [ns_1@127.0.0.1:<0.28613.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:00] [ns_1@127.0.0.1:<0.28633.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:00] [ns_1@127.0.0.1:<0.28546.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:01] [ns_1@127.0.0.1:<0.28636.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:01] [ns_1@127.0.0.1:<0.28633.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:01] [ns_1@127.0.0.1:<0.28516.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:02] [ns_1@127.0.0.1:<0.28646.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:02] [ns_1@127.0.0.1:<0.28633.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:02] [ns_1@127.0.0.1:<0.28566.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:03] [ns_1@127.0.0.1:<0.28653.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:27:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28633.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:27:03] [ns_1@127.0.0.1:<0.28531.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:04] [ns_1@127.0.0.1:<0.28659.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:04] [ns_1@127.0.0.1:<0.28578.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:27:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28642.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:27:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28676.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:27:05] [ns_1@127.0.0.1:<0.28666.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:05] [ns_1@127.0.0.1:<0.28542.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:06] [ns_1@127.0.0.1:<0.28670.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:06] [ns_1@127.0.0.1:<0.28594.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:07] [ns_1@127.0.0.1:<0.28682.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:07] [ns_1@127.0.0.1:<0.28560.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:08] [ns_1@127.0.0.1:<0.28686.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:08] [ns_1@127.0.0.1:<0.28605.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:09] [ns_1@127.0.0.1:<0.28692.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:09] [ns_1@127.0.0.1:<0.28705.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:09] [ns_1@127.0.0.1:<0.28573.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:27:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28676.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:27:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28713.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:27:10] [ns_1@127.0.0.1:<0.28697.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:10] [ns_1@127.0.0.1:<0.28705.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:10] [ns_1@127.0.0.1:<0.28615.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:11] [ns_1@127.0.0.1:<0.28708.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:11] [ns_1@127.0.0.1:<0.28705.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:11] [ns_1@127.0.0.1:<0.28586.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:12] [ns_1@127.0.0.1:<0.28717.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:12] [ns_1@127.0.0.1:<0.28705.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:12] [ns_1@127.0.0.1:<0.28648.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:13] [ns_1@127.0.0.1:<0.28725.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:27:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28705.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:27:13] [ns_1@127.0.0.1:<0.28600.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:14] [ns_1@127.0.0.1:<0.28730.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:15] [ns_1@127.0.0.1:<0.28661.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:27:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28713.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:27:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28747.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:27:15] [ns_1@127.0.0.1:<0.28737.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:16] [ns_1@127.0.0.1:<0.28611.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:16] [ns_1@127.0.0.1:<0.28744.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:17] [ns_1@127.0.0.1:<0.28677.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:17] [ns_1@127.0.0.1:<0.28752.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:18] [ns_1@127.0.0.1:<0.28643.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:18] [ns_1@127.0.0.1:<0.28756.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:19] [ns_1@127.0.0.1:<0.28688.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:19] [ns_1@127.0.0.1:<0.28763.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:19] [ns_1@127.0.0.1:<0.28773.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:27:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28747.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:27:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28780.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:27:20] [ns_1@127.0.0.1:<0.28655.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:20] [ns_1@127.0.0.1:<0.28767.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:20] [ns_1@127.0.0.1:<0.28773.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:21] [ns_1@127.0.0.1:<0.28699.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:21] [ns_1@127.0.0.1:<0.28781.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:21] [ns_1@127.0.0.1:<0.28773.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:22] [ns_1@127.0.0.1:<0.28668.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:22] [ns_1@127.0.0.1:<0.28787.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:22] [ns_1@127.0.0.1:<0.28773.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:23] [ns_1@127.0.0.1:<0.28719.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:23] [ns_1@127.0.0.1:<0.28794.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:27:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28773.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:27:24] [ns_1@127.0.0.1:<0.28684.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:24] [ns_1@127.0.0.1:<0.28799.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:27:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28780.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:27:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28814.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:27:25] [ns_1@127.0.0.1:<0.28732.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:25] [ns_1@127.0.0.1:<0.28807.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:26] [ns_1@127.0.0.1:<0.28694.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:26] [ns_1@127.0.0.1:<0.28815.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:27] [ns_1@127.0.0.1:<0.28748.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:27] [ns_1@127.0.0.1:<0.28821.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:28] [ns_1@127.0.0.1:<0.28714.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:28] [ns_1@127.0.0.1:<0.28826.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:29] [ns_1@127.0.0.1:<0.28758.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:29] [ns_1@127.0.0.1:<0.28832.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:29] [ns_1@127.0.0.1:<0.28842.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:27:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28814.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:27:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28849.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:27:30] [ns_1@127.0.0.1:<0.28727.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:30] [ns_1@127.0.0.1:<0.28836.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:30] [ns_1@127.0.0.1:<0.28842.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:31] [ns_1@127.0.0.1:<0.28769.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:31] [ns_1@127.0.0.1:<0.28850.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:31] [ns_1@127.0.0.1:<0.28842.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:32] [ns_1@127.0.0.1:<0.28739.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:32] [ns_1@127.0.0.1:<0.28855.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:32] [ns_1@127.0.0.1:<0.28842.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:33] [ns_1@127.0.0.1:<0.28789.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:33] [ns_1@127.0.0.1:<0.28862.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:27:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28842.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:27:34] [ns_1@127.0.0.1:<0.28754.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:34] [ns_1@127.0.0.1:<0.28868.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:27:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28849.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:27:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28883.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:27:35] [ns_1@127.0.0.1:<0.28801.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:35] [ns_1@127.0.0.1:<0.28875.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:36] [ns_1@127.0.0.1:<0.28765.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:36] [ns_1@127.0.0.1:<0.28884.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:37] [ns_1@127.0.0.1:<0.28817.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:37] [ns_1@127.0.0.1:<0.28891.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:38] [ns_1@127.0.0.1:<0.28783.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:38] [ns_1@127.0.0.1:<0.28895.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:39] [ns_1@127.0.0.1:<0.28828.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:39] [ns_1@127.0.0.1:<0.28910.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:39] [ns_1@127.0.0.1:<0.28901.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:27:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28883.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:27:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28918.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:27:40] [ns_1@127.0.0.1:<0.28796.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:40] [ns_1@127.0.0.1:<0.28910.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:41] [ns_1@127.0.0.1:<0.28910.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:42] [ns_1@127.0.0.1:<0.28910.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:27:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28910.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 1:27:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28918.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:27:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28934.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:27:49] [ns_1@127.0.0.1:<0.28942.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:27:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28934.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:27:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28949.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:27:50] [ns_1@127.0.0.1:<0.28809.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:50] [ns_1@127.0.0.1:<0.28942.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:50] [ns_1@127.0.0.1:<0.28906.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:51] [ns_1@127.0.0.1:<0.28838.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:51] [ns_1@127.0.0.1:<0.28942.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:51] [ns_1@127.0.0.1:<0.28919.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:52] [ns_1@127.0.0.1:<0.28823.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:52] [ns_1@127.0.0.1:<0.28942.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:52] [ns_1@127.0.0.1:<0.28956.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:53] [ns_1@127.0.0.1:<0.28857.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:27:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28942.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:27:53] [ns_1@127.0.0.1:<0.28950.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:54] [ns_1@127.0.0.1:<0.28834.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:54] [ns_1@127.0.0.1:<0.28968.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:27:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28949.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:27:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28983.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:27:55] [ns_1@127.0.0.1:<0.28870.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:55] [ns_1@127.0.0.1:<0.28963.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:56] [ns_1@127.0.0.1:<0.28852.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:56] [ns_1@127.0.0.1:<0.28984.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:57] [ns_1@127.0.0.1:<0.28888.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:57] [ns_1@127.0.0.1:<0.28976.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:58] [ns_1@127.0.0.1:<0.28864.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:59] [ns_1@127.0.0.1:<0.28995.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:27:59] [ns_1@127.0.0.1:<0.28899.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:27:59] [ns_1@127.0.0.1:<0.29010.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:27:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,750474,992209}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37494784}, {processes,10099712}, {processes_used,8474232}, {system,27395072}, {atom,1306681}, {atom_used,1284164}, {binary,530064}, {code,12859877}, {ets,2021240}]}, {system_stats, [{cpu_utilization_rate,25.25}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,1496}, {memory_data,{4040077312,4012679168,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26508 kB\nBuffers: 56696 kB\nCached: 3533064 kB\nSwapCached: 0 kB\nActive: 304868 kB\nInactive: 3445348 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26508 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 48 kB\nWriteback: 0 kB\nAnonPages: 160472 kB\nMapped: 24868 kB\nSlab: 134432 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580356 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3617857536}, {buffered_memory,58056704}, {free_memory,27144192}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{1491729,0}}, {context_switches,{728103,0}}, {garbage_collection,{370315,451258029,0}}, {io,{{input,19056910},{output,24348227}}}, {reductions,{164152176,442982}}, {run_queue,0}, {runtime,{28590,120}}]}]}] [stats:error] [2012-03-26 1:28:00] [ns_1@127.0.0.1:<0.28990.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:28:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28983.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:28:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29019.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:28:00] [ns_1@127.0.0.1:<0.28877.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:00] [ns_1@127.0.0.1:<0.29010.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:01] [ns_1@127.0.0.1:<0.29005.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:01] [ns_1@127.0.0.1:<0.28913.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:01] [ns_1@127.0.0.1:<0.29010.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:02] [ns_1@127.0.0.1:<0.29001.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:02] [ns_1@127.0.0.1:<0.28893.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:02] [ns_1@127.0.0.1:<0.29010.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:03] [ns_1@127.0.0.1:<0.29025.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:03] [ns_1@127.0.0.1:<0.28961.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:28:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.29010.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:28:04] [ns_1@127.0.0.1:<0.29020.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:04] [ns_1@127.0.0.1:<0.28904.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:28:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29019.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:28:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29051.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:28:05] [ns_1@127.0.0.1:<0.29038.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:05] [ns_1@127.0.0.1:<0.28974.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:06] [ns_1@127.0.0.1:<0.29032.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:06] [ns_1@127.0.0.1:<0.28954.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:07] [ns_1@127.0.0.1:<0.29054.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:07] [ns_1@127.0.0.1:<0.28988.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:08] [ns_1@127.0.0.1:<0.29045.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:08] [ns_1@127.0.0.1:<0.28966.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:09] [ns_1@127.0.0.1:<0.29065.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:09] [ns_1@127.0.0.1:<0.28999.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:09] [ns_1@127.0.0.1:<0.29082.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:28:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29051.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:28:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29088.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:28:10] [ns_1@127.0.0.1:<0.29061.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:10] [ns_1@127.0.0.1:<0.28978.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:10] [ns_1@127.0.0.1:<0.29082.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:11] [ns_1@127.0.0.1:<0.29076.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:11] [ns_1@127.0.0.1:<0.29016.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:11] [ns_1@127.0.0.1:<0.29082.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:12] [ns_1@127.0.0.1:<0.29071.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:12] [ns_1@127.0.0.1:<0.28993.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:12] [ns_1@127.0.0.1:<0.29082.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:13] [ns_1@127.0.0.1:<0.29096.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:13] [ns_1@127.0.0.1:<0.29030.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:28:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.29082.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:28:14] [ns_1@127.0.0.1:<0.29091.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:14] [ns_1@127.0.0.1:<0.29003.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:28:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29088.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:28:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29122.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:28:15] [ns_1@127.0.0.1:<0.29109.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:15] [ns_1@127.0.0.1:<0.29043.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:16] [ns_1@127.0.0.1:<0.29104.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:16] [ns_1@127.0.0.1:<0.29023.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:17] [ns_1@127.0.0.1:<0.29125.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:17] [ns_1@127.0.0.1:<0.29059.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:18] [ns_1@127.0.0.1:<0.29116.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:18] [ns_1@127.0.0.1:<0.29036.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:19] [ns_1@127.0.0.1:<0.29135.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:19] [ns_1@127.0.0.1:<0.29148.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:19] [ns_1@127.0.0.1:<0.29069.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:28:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29122.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:28:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29157.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:28:20] [ns_1@127.0.0.1:<0.29131.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:20] [ns_1@127.0.0.1:<0.29148.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:20] [ns_1@127.0.0.1:<0.29052.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:21] [ns_1@127.0.0.1:<0.29151.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:21] [ns_1@127.0.0.1:<0.29148.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:21] [ns_1@127.0.0.1:<0.29089.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:22] [ns_1@127.0.0.1:<0.29142.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:22] [ns_1@127.0.0.1:<0.29148.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:22] [ns_1@127.0.0.1:<0.29063.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:23] [ns_1@127.0.0.1:<0.29169.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:28:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.29148.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:28:23] [ns_1@127.0.0.1:<0.29102.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:24] [ns_1@127.0.0.1:<0.29161.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:24] [ns_1@127.0.0.1:<0.29074.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:28:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29157.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:28:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29191.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:28:25] [ns_1@127.0.0.1:<0.29182.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:25] [ns_1@127.0.0.1:<0.29114.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:26] [ns_1@127.0.0.1:<0.29174.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:26] [ns_1@127.0.0.1:<0.29094.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:27] [ns_1@127.0.0.1:<0.29196.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:27] [ns_1@127.0.0.1:<0.29129.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:28] [ns_1@127.0.0.1:<0.29186.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:28] [ns_1@127.0.0.1:<0.29107.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:29] [ns_1@127.0.0.1:<0.29207.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:29] [ns_1@127.0.0.1:<0.29217.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:29] [ns_1@127.0.0.1:<0.29140.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:28:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29191.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:28:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29226.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:28:30] [ns_1@127.0.0.1:<0.29201.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:30] [ns_1@127.0.0.1:<0.29217.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:30] [ns_1@127.0.0.1:<0.29123.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:31] [ns_1@127.0.0.1:<0.29221.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:31] [ns_1@127.0.0.1:<0.29217.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:31] [ns_1@127.0.0.1:<0.29158.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:32] [ns_1@127.0.0.1:<0.29211.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:32] [ns_1@127.0.0.1:<0.29217.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:32] [ns_1@127.0.0.1:<0.29133.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:33] [ns_1@127.0.0.1:<0.29237.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:28:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.29217.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:28:33] [ns_1@127.0.0.1:<0.29171.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:34] [ns_1@127.0.0.1:<0.29230.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:35] [ns_1@127.0.0.1:<0.29144.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:28:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29226.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:28:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29260.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:28:35] [ns_1@127.0.0.1:<0.29250.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:36] [ns_1@127.0.0.1:<0.29184.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:36] [ns_1@127.0.0.1:<0.29243.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:37] [ns_1@127.0.0.1:<0.29164.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:37] [ns_1@127.0.0.1:<0.29266.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:38] [ns_1@127.0.0.1:<0.29198.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:38] [ns_1@127.0.0.1:<0.29254.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:39] [ns_1@127.0.0.1:<0.29176.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:39] [ns_1@127.0.0.1:<0.29276.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:39] [ns_1@127.0.0.1:<0.29287.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:28:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29260.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:28:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29293.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:28:40] [ns_1@127.0.0.1:<0.29209.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:40] [ns_1@127.0.0.1:<0.29270.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:40] [ns_1@127.0.0.1:<0.29287.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:41] [ns_1@127.0.0.1:<0.29192.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:41] [ns_1@127.0.0.1:<0.29294.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:41] [ns_1@127.0.0.1:<0.29287.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:42] [ns_1@127.0.0.1:<0.29227.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:42] [ns_1@127.0.0.1:<0.29281.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:42] [ns_1@127.0.0.1:<0.29287.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:43] [ns_1@127.0.0.1:<0.29203.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:43] [ns_1@127.0.0.1:<0.29307.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:28:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.29287.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:28:44] [ns_1@127.0.0.1:<0.29239.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:44] [ns_1@127.0.0.1:<0.29299.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:28:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29293.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:28:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29327.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:28:45] [ns_1@127.0.0.1:<0.29213.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:45] [ns_1@127.0.0.1:<0.29319.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:46] [ns_1@127.0.0.1:<0.29252.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:46] [ns_1@127.0.0.1:<0.29312.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:47] [ns_1@127.0.0.1:<0.29232.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:47] [ns_1@127.0.0.1:<0.29334.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:48] [ns_1@127.0.0.1:<0.29268.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:48] [ns_1@127.0.0.1:<0.29328.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:49] [ns_1@127.0.0.1:<0.29245.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:49] [ns_1@127.0.0.1:<0.29345.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:49] [ns_1@127.0.0.1:<0.29355.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:28:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29327.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:28:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29362.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:28:50] [ns_1@127.0.0.1:<0.29278.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:50] [ns_1@127.0.0.1:<0.29338.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:50] [ns_1@127.0.0.1:<0.29349.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:50] [ns_1@127.0.0.1:<0.29261.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:50] [ns_1@127.0.0.1:<0.29355.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:51] [ns_1@127.0.0.1:<0.29272.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:51] [ns_1@127.0.0.1:<0.29363.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:51] [ns_1@127.0.0.1:<0.29355.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:52] [ns_1@127.0.0.1:<0.29296.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:52] [ns_1@127.0.0.1:<0.29373.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:52] [ns_1@127.0.0.1:<0.29355.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:53] [ns_1@127.0.0.1:<0.29283.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:28:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.29355.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:28:53] [ns_1@127.0.0.1:<0.29380.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:54] [ns_1@127.0.0.1:<0.29309.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:54] [ns_1@127.0.0.1:<0.29385.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:28:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29362.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:28:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29400.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:28:55] [ns_1@127.0.0.1:<0.29301.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:55] [ns_1@127.0.0.1:<0.29393.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:56] [ns_1@127.0.0.1:<0.29321.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:56] [ns_1@127.0.0.1:<0.29401.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:57] [ns_1@127.0.0.1:<0.29314.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:57] [ns_1@127.0.0.1:<0.29407.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:58] [ns_1@127.0.0.1:<0.29336.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:58] [ns_1@127.0.0.1:<0.29412.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:28:59] [ns_1@127.0.0.1:<0.29330.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:28:59] [ns_1@127.0.0.1:<0.29427.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:28:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,750535,19583}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37914968}, {processes,10483048}, {processes_used,8857568}, {system,27431920}, {atom,1306681}, {atom_used,1284164}, {binary,531600}, {code,12859877}, {ets,2049808}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,1556}, {memory_data,{4040077312,4012933120,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26384 kB\nBuffers: 56792 kB\nCached: 3533220 kB\nSwapCached: 0 kB\nActive: 305012 kB\nInactive: 3445516 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26384 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 72 kB\nWriteback: 0 kB\nAnonPages: 160512 kB\nMapped: 24868 kB\nSlab: 134436 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580356 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3618017280}, {buffered_memory,58155008}, {free_memory,27017216}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{1551756,0}}, {context_switches,{741039,0}}, {garbage_collection,{377588,462383669,0}}, {io,{{input,19087499},{output,24785678}}}, {reductions,{166748892,644376}}, {run_queue,0}, {runtime,{29160,130}}]}]}] [stats:error] [2012-03-26 1:28:59] [ns_1@127.0.0.1:<0.29418.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:29:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29400.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:29:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29436.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:29:00] [ns_1@127.0.0.1:<0.29347.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:00] [ns_1@127.0.0.1:<0.29427.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:00] [ns_1@127.0.0.1:<0.29422.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:01] [ns_1@127.0.0.1:<0.29340.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:01] [ns_1@127.0.0.1:<0.29427.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:01] [ns_1@127.0.0.1:<0.29437.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:02] [ns_1@127.0.0.1:<0.29365.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:02] [ns_1@127.0.0.1:<0.29427.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:02] [ns_1@127.0.0.1:<0.29442.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:03] [ns_1@127.0.0.1:<0.29351.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:29:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.29427.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:29:03] [ns_1@127.0.0.1:<0.29449.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:04] [ns_1@127.0.0.1:<0.29367.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:04] [ns_1@127.0.0.1:<0.29455.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:29:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29436.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:29:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29470.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:29:05] [ns_1@127.0.0.1:<0.29375.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:05] [ns_1@127.0.0.1:<0.29462.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:06] [ns_1@127.0.0.1:<0.29369.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:06] [ns_1@127.0.0.1:<0.29471.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:07] [ns_1@127.0.0.1:<0.29390.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:07] [ns_1@127.0.0.1:<0.29478.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:08] [ns_1@127.0.0.1:<0.29382.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:08] [ns_1@127.0.0.1:<0.29482.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:09] [ns_1@127.0.0.1:<0.29405.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:09] [ns_1@127.0.0.1:<0.29499.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:10] [ns_1@127.0.0.1:<0.29488.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:29:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29470.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:29:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29507.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:29:10] [ns_1@127.0.0.1:<0.29395.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:10] [ns_1@127.0.0.1:<0.29499.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:11] [ns_1@127.0.0.1:<0.29493.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:11] [ns_1@127.0.0.1:<0.29416.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:11] [ns_1@127.0.0.1:<0.29499.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:12] [ns_1@127.0.0.1:<0.29508.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:12] [ns_1@127.0.0.1:<0.29410.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:12] [ns_1@127.0.0.1:<0.29499.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:13] [ns_1@127.0.0.1:<0.29513.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:13] [ns_1@127.0.0.1:<0.29431.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:29:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.29499.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:29:14] [ns_1@127.0.0.1:<0.29521.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:14] [ns_1@127.0.0.1:<0.29420.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:29:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29507.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:29:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29539.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:29:15] [ns_1@127.0.0.1:<0.29526.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:15] [ns_1@127.0.0.1:<0.29447.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:16] [ns_1@127.0.0.1:<0.29533.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:16] [ns_1@127.0.0.1:<0.29440.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:17] [ns_1@127.0.0.1:<0.29542.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:17] [ns_1@127.0.0.1:<0.29460.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:18] [ns_1@127.0.0.1:<0.29548.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:18] [ns_1@127.0.0.1:<0.29453.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:19] [ns_1@127.0.0.1:<0.29552.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:19] [ns_1@127.0.0.1:<0.29476.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:19] [ns_1@127.0.0.1:<0.29567.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:29:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29539.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:29:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29574.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:29:20] [ns_1@127.0.0.1:<0.29559.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:20] [ns_1@127.0.0.1:<0.29464.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:20] [ns_1@127.0.0.1:<0.29567.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:21] [ns_1@127.0.0.1:<0.29563.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:21] [ns_1@127.0.0.1:<0.29486.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:21] [ns_1@127.0.0.1:<0.29567.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:22] [ns_1@127.0.0.1:<0.29577.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:22] [ns_1@127.0.0.1:<0.29480.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:22] [ns_1@127.0.0.1:<0.29567.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:23] [ns_1@127.0.0.1:<0.29583.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:23] [ns_1@127.0.0.1:<0.29502.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:29:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.29567.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:29:24] [ns_1@127.0.0.1:<0.29590.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:24] [ns_1@127.0.0.1:<0.29491.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:29:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29574.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:29:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29608.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:29:25] [ns_1@127.0.0.1:<0.29595.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:25] [ns_1@127.0.0.1:<0.29519.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:26] [ns_1@127.0.0.1:<0.29603.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:26] [ns_1@127.0.0.1:<0.29511.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:27] [ns_1@127.0.0.1:<0.29611.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:27] [ns_1@127.0.0.1:<0.29531.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:28] [ns_1@127.0.0.1:<0.29617.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:28] [ns_1@127.0.0.1:<0.29524.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:29] [ns_1@127.0.0.1:<0.29622.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:29] [ns_1@127.0.0.1:<0.29634.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:29] [ns_1@127.0.0.1:<0.29546.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:29:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29608.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:29:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29643.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:29:30] [ns_1@127.0.0.1:<0.29628.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:30] [ns_1@127.0.0.1:<0.29634.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:30] [ns_1@127.0.0.1:<0.29540.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:31] [ns_1@127.0.0.1:<0.29637.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:31] [ns_1@127.0.0.1:<0.29634.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:31] [ns_1@127.0.0.1:<0.29557.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:32] [ns_1@127.0.0.1:<0.29647.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:32] [ns_1@127.0.0.1:<0.29634.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:32] [ns_1@127.0.0.1:<0.29550.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:33] [ns_1@127.0.0.1:<0.29654.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:29:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.29634.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:29:33] [ns_1@127.0.0.1:<0.29575.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:34] [ns_1@127.0.0.1:<0.29660.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:34] [ns_1@127.0.0.1:<0.29561.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:29:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29643.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:29:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29677.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:29:35] [ns_1@127.0.0.1:<0.29667.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:35] [ns_1@127.0.0.1:<0.29588.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:36] [ns_1@127.0.0.1:<0.29671.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:36] [ns_1@127.0.0.1:<0.29581.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:37] [ns_1@127.0.0.1:<0.29683.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:37] [ns_1@127.0.0.1:<0.29601.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:38] [ns_1@127.0.0.1:<0.29687.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:38] [ns_1@127.0.0.1:<0.29593.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:39] [ns_1@127.0.0.1:<0.29693.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:39] [ns_1@127.0.0.1:<0.29704.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:39] [ns_1@127.0.0.1:<0.29615.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:29:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29677.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:29:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29712.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:29:40] [ns_1@127.0.0.1:<0.29698.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:40] [ns_1@127.0.0.1:<0.29704.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:40] [ns_1@127.0.0.1:<0.29609.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:41] [ns_1@127.0.0.1:<0.29707.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:41] [ns_1@127.0.0.1:<0.29704.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:41] [ns_1@127.0.0.1:<0.29626.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:42] [ns_1@127.0.0.1:<0.29716.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:42] [ns_1@127.0.0.1:<0.29704.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:42] [ns_1@127.0.0.1:<0.29620.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:43] [ns_1@127.0.0.1:<0.29724.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:29:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.29704.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:29:43] [ns_1@127.0.0.1:<0.29644.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:44] [ns_1@127.0.0.1:<0.29729.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:45] [ns_1@127.0.0.1:<0.29630.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:29:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29712.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:29:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29746.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:29:45] [ns_1@127.0.0.1:<0.29736.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:46] [ns_1@127.0.0.1:<0.29656.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:46] [ns_1@127.0.0.1:<0.29741.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:47] [ns_1@127.0.0.1:<0.29649.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:47] [ns_1@127.0.0.1:<0.29751.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:48] [ns_1@127.0.0.1:<0.29669.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:48] [ns_1@127.0.0.1:<0.29755.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:49] [ns_1@127.0.0.1:<0.29662.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:49] [ns_1@127.0.0.1:<0.29762.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:49] [ns_1@127.0.0.1:<0.29772.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:29:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29746.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:29:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29780.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:29:50] [ns_1@127.0.0.1:<0.29685.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:50] [ns_1@127.0.0.1:<0.29766.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:50] [ns_1@127.0.0.1:<0.29772.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:51] [ns_1@127.0.0.1:<0.29678.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:51] [ns_1@127.0.0.1:<0.29689.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:51] [ns_1@127.0.0.1:<0.29700.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:51] [ns_1@127.0.0.1:<0.29778.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:51] [ns_1@127.0.0.1:<0.29772.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:52] [ns_1@127.0.0.1:<0.29695.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:52] [ns_1@127.0.0.1:<0.29786.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:52] [ns_1@127.0.0.1:<0.29772.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:53] [ns_1@127.0.0.1:<0.29718.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:53] [ns_1@127.0.0.1:<0.29797.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:29:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.29772.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:29:54] [ns_1@127.0.0.1:<0.29713.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:54] [ns_1@127.0.0.1:<0.29788.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:29:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29780.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:29:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29817.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:29:55] [ns_1@127.0.0.1:<0.29731.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:55] [ns_1@127.0.0.1:<0.29810.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:56] [ns_1@127.0.0.1:<0.29726.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:56] [ns_1@127.0.0.1:<0.29790.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:57] [ns_1@127.0.0.1:<0.29747.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:57] [ns_1@127.0.0.1:<0.29824.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:58] [ns_1@127.0.0.1:<0.29738.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:58] [ns_1@127.0.0.1:<0.29802.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:59] [ns_1@127.0.0.1:<0.29757.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:29:59] [ns_1@127.0.0.1:<0.29835.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:29:59] [ns_1@127.0.0.1:<0.29861.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:29:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,750595,46348}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37726072}, {processes,10314600}, {processes_used,8689120}, {system,27411472}, {atom,1306681}, {atom_used,1284164}, {binary,531528}, {code,12859877}, {ets,2021616}]}, {system_stats, [{cpu_utilization_rate,25.75}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,1616}, {memory_data,{4040077312,4013060096,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26260 kB\nBuffers: 56888 kB\nCached: 3533376 kB\nSwapCached: 0 kB\nActive: 305132 kB\nInactive: 3445644 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26260 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 36 kB\nWriteback: 0 kB\nAnonPages: 160528 kB\nMapped: 24868 kB\nSlab: 134440 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580356 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3618177024}, {buffered_memory,58253312}, {free_memory,26890240}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{1611783,0}}, {context_switches,{754060,0}}, {garbage_collection,{384841,473569950,0}}, {io,{{input,19118097},{output,25227773}}}, {reductions,{169362399,633686}}, {run_queue,0}, {runtime,{29740,150}}]}]}] [error_logger:error] [2012-03-26 1:30:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29817.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:30:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29868.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:30:00] [ns_1@127.0.0.1:<0.29753.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:00] [ns_1@127.0.0.1:<0.29818.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:00] [ns_1@127.0.0.1:<0.29861.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:01] [ns_1@127.0.0.1:<0.29768.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:01] [ns_1@127.0.0.1:<0.29869.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:01] [ns_1@127.0.0.1:<0.29861.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:02] [ns_1@127.0.0.1:<0.29764.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:02] [ns_1@127.0.0.1:<0.29829.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:02] [ns_1@127.0.0.1:<0.29861.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:03] [ns_1@127.0.0.1:<0.29792.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:30:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.29861.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:30:03] [ns_1@127.0.0.1:<0.29881.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:04] [ns_1@127.0.0.1:<0.29782.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:04] [ns_1@127.0.0.1:<0.29839.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:30:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29868.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:30:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29902.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:30:05] [ns_1@127.0.0.1:<0.29804.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:05] [ns_1@127.0.0.1:<0.29894.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:06] [ns_1@127.0.0.1:<0.29799.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:06] [ns_1@127.0.0.1:<0.29874.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:07] [ns_1@127.0.0.1:<0.29820.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:07] [ns_1@127.0.0.1:<0.29910.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:08] [ns_1@127.0.0.1:<0.29812.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:08] [ns_1@127.0.0.1:<0.29887.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:09] [ns_1@127.0.0.1:<0.29831.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:09] [ns_1@127.0.0.1:<0.29931.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:09] [ns_1@127.0.0.1:<0.29920.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:30:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29902.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:30:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29939.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:30:10] [ns_1@127.0.0.1:<0.29826.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:10] [ns_1@127.0.0.1:<0.29931.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:10] [ns_1@127.0.0.1:<0.29903.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:11] [ns_1@127.0.0.1:<0.29856.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:11] [ns_1@127.0.0.1:<0.29931.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:11] [ns_1@127.0.0.1:<0.29940.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:12] [ns_1@127.0.0.1:<0.29837.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:12] [ns_1@127.0.0.1:<0.29931.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:12] [ns_1@127.0.0.1:<0.29914.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:13] [ns_1@127.0.0.1:<0.29876.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:30:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.29931.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:30:13] [ns_1@127.0.0.1:<0.29953.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:14] [ns_1@127.0.0.1:<0.29871.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:14] [ns_1@127.0.0.1:<0.29925.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:30:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29939.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:30:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29973.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:30:15] [ns_1@127.0.0.1:<0.29892.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:15] [ns_1@127.0.0.1:<0.29965.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:16] [ns_1@127.0.0.1:<0.29883.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:16] [ns_1@127.0.0.1:<0.29945.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:17] [ns_1@127.0.0.1:<0.29907.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:17] [ns_1@127.0.0.1:<0.29980.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:18] [ns_1@127.0.0.1:<0.29896.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:19] [ns_1@127.0.0.1:<0.29958.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:19] [ns_1@127.0.0.1:<0.29918.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:19] [ns_1@127.0.0.1:<0.29999.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:20] [ns_1@127.0.0.1:<0.29991.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:30:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29973.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:30:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30008.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:30:20] [ns_1@127.0.0.1:<0.29912.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:20] [ns_1@127.0.0.1:<0.29999.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:21] [ns_1@127.0.0.1:<0.29974.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:21] [ns_1@127.0.0.1:<0.29934.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:21] [ns_1@127.0.0.1:<0.29999.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:22] [ns_1@127.0.0.1:<0.30009.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:22] [ns_1@127.0.0.1:<0.29923.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:22] [ns_1@127.0.0.1:<0.29999.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:23] [ns_1@127.0.0.1:<0.29984.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:23] [ns_1@127.0.0.1:<0.29951.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:30:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.29999.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:30:24] [ns_1@127.0.0.1:<0.30022.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:24] [ns_1@127.0.0.1:<0.29943.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:30:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30008.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:30:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30041.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:30:25] [ns_1@127.0.0.1:<0.29995.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:25] [ns_1@127.0.0.1:<0.29963.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:26] [ns_1@127.0.0.1:<0.30035.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:26] [ns_1@127.0.0.1:<0.29956.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:27] [ns_1@127.0.0.1:<0.30015.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:27] [ns_1@127.0.0.1:<0.29978.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:28] [ns_1@127.0.0.1:<0.30049.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:28] [ns_1@127.0.0.1:<0.29968.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:29] [ns_1@127.0.0.1:<0.30027.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:29] [ns_1@127.0.0.1:<0.29989.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:29] [ns_1@127.0.0.1:<0.30068.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:30:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30041.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:30:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30075.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:30:30] [ns_1@127.0.0.1:<0.30060.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:30] [ns_1@127.0.0.1:<0.29982.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:30] [ns_1@127.0.0.1:<0.30068.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:31] [ns_1@127.0.0.1:<0.30043.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:31] [ns_1@127.0.0.1:<0.30002.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:31] [ns_1@127.0.0.1:<0.30068.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:32] [ns_1@127.0.0.1:<0.30078.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:32] [ns_1@127.0.0.1:<0.29993.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:32] [ns_1@127.0.0.1:<0.30068.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:33] [ns_1@127.0.0.1:<0.30054.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:33] [ns_1@127.0.0.1:<0.30020.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:30:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.30068.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:30:34] [ns_1@127.0.0.1:<0.30090.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:34] [ns_1@127.0.0.1:<0.30013.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:30:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30075.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:30:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30109.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:30:35] [ns_1@127.0.0.1:<0.30064.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:35] [ns_1@127.0.0.1:<0.30033.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:36] [ns_1@127.0.0.1:<0.30103.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:36] [ns_1@127.0.0.1:<0.30025.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:37] [ns_1@127.0.0.1:<0.30083.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:37] [ns_1@127.0.0.1:<0.30047.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:38] [ns_1@127.0.0.1:<0.30119.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:38] [ns_1@127.0.0.1:<0.30039.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:39] [ns_1@127.0.0.1:<0.30096.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:39] [ns_1@127.0.0.1:<0.30136.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:39] [ns_1@127.0.0.1:<0.30058.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:30:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30109.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:30:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30144.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:30:40] [ns_1@127.0.0.1:<0.30129.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:40] [ns_1@127.0.0.1:<0.30136.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:40] [ns_1@127.0.0.1:<0.30052.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:41] [ns_1@127.0.0.1:<0.30112.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:41] [ns_1@127.0.0.1:<0.30136.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:41] [ns_1@127.0.0.1:<0.30076.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:42] [ns_1@127.0.0.1:<0.30148.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:42] [ns_1@127.0.0.1:<0.30136.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:42] [ns_1@127.0.0.1:<0.30062.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:43] [ns_1@127.0.0.1:<0.30123.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:30:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.30136.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:30:43] [ns_1@127.0.0.1:<0.30088.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:44] [ns_1@127.0.0.1:<0.30161.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:44] [ns_1@127.0.0.1:<0.30081.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:30:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30144.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:30:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30178.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:30:45] [ns_1@127.0.0.1:<0.30139.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:45] [ns_1@127.0.0.1:<0.30101.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:46] [ns_1@127.0.0.1:<0.30173.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:46] [ns_1@127.0.0.1:<0.30094.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:47] [ns_1@127.0.0.1:<0.30156.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:47] [ns_1@127.0.0.1:<0.30117.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:48] [ns_1@127.0.0.1:<0.30187.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:48] [ns_1@127.0.0.1:<0.30110.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:49] [ns_1@127.0.0.1:<0.30168.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:49] [ns_1@127.0.0.1:<0.30204.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:49] [ns_1@127.0.0.1:<0.30127.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:30:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30178.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:30:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30213.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:30:50] [ns_1@127.0.0.1:<0.30198.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:50] [ns_1@127.0.0.1:<0.30204.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:50] [ns_1@127.0.0.1:<0.30121.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:51] [ns_1@127.0.0.1:<0.30183.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:51] [ns_1@127.0.0.1:<0.30194.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:51] [ns_1@127.0.0.1:<0.30207.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:51] [ns_1@127.0.0.1:<0.30204.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:51] [ns_1@127.0.0.1:<0.30145.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:52] [ns_1@127.0.0.1:<0.30218.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:52] [ns_1@127.0.0.1:<0.30204.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:53] [ns_1@127.0.0.1:<0.30132.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:53] [ns_1@127.0.0.1:<0.30229.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:30:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.30204.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:30:54] [ns_1@127.0.0.1:<0.30158.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:54] [ns_1@127.0.0.1:<0.30234.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:55] [ns_1@127.0.0.1:<0.30150.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:30:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30213.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:30:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30251.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:30:55] [ns_1@127.0.0.1:<0.30242.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:56] [ns_1@127.0.0.1:<0.30170.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:56] [ns_1@127.0.0.1:<0.30246.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:57] [ns_1@127.0.0.1:<0.30163.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:57] [ns_1@127.0.0.1:<0.30256.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:58] [ns_1@127.0.0.1:<0.30185.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:58] [ns_1@127.0.0.1:<0.30261.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:59] [ns_1@127.0.0.1:<0.30179.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:30:59] [ns_1@127.0.0.1:<0.30267.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:30:59] [ns_1@127.0.0.1:<0.30278.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:30:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,750655,74292}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37742576}, {processes,10298808}, {processes_used,8673328}, {system,27443768}, {atom,1306681}, {atom_used,1284164}, {binary,526336}, {code,12859877}, {ets,2052944}]}, {system_stats, [{cpu_utilization_rate,25.376884422110553}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,1676}, {memory_data,{4040077312,4013187072,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25888 kB\nBuffers: 57040 kB\nCached: 3533520 kB\nSwapCached: 0 kB\nActive: 305500 kB\nInactive: 3445928 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25888 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 64 kB\nWriteback: 0 kB\nAnonPages: 160768 kB\nMapped: 24868 kB\nSlab: 134432 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580356 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3618324480}, {buffered_memory,58408960}, {free_memory,26509312}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{1671811,1}}, {context_switches,{767519,0}}, {garbage_collection,{392060,484984309,0}}, {io,{{input,19394932},{output,25896043}}}, {reductions,{172023835,651057}}, {run_queue,0}, {runtime,{30350,160}}]}]}] [error_logger:error] [2012-03-26 1:31:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30251.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:31:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30285.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:31:00] [ns_1@127.0.0.1:<0.30196.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:00] [ns_1@127.0.0.1:<0.30271.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:00] [ns_1@127.0.0.1:<0.30278.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:01] [ns_1@127.0.0.1:<0.30189.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:01] [ns_1@127.0.0.1:<0.30286.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:01] [ns_1@127.0.0.1:<0.30278.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:02] [ns_1@127.0.0.1:<0.30214.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:02] [ns_1@127.0.0.1:<0.30291.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:02] [ns_1@127.0.0.1:<0.30278.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:03] [ns_1@127.0.0.1:<0.30200.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:03] [ns_1@127.0.0.1:<0.30298.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:31:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.30278.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:31:04] [ns_1@127.0.0.1:<0.30231.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:04] [ns_1@127.0.0.1:<0.30304.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:31:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30285.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:31:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30319.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:31:05] [ns_1@127.0.0.1:<0.30220.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:05] [ns_1@127.0.0.1:<0.30311.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:06] [ns_1@127.0.0.1:<0.30244.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:06] [ns_1@127.0.0.1:<0.30320.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:07] [ns_1@127.0.0.1:<0.30222.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:07] [ns_1@127.0.0.1:<0.30327.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:08] [ns_1@127.0.0.1:<0.30258.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:08] [ns_1@127.0.0.1:<0.30331.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:09] [ns_1@127.0.0.1:<0.30224.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:09] [ns_1@127.0.0.1:<0.30337.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:09] [ns_1@127.0.0.1:<0.30350.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:31:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30319.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:31:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30356.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:31:10] [ns_1@127.0.0.1:<0.30269.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:10] [ns_1@127.0.0.1:<0.30342.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:10] [ns_1@127.0.0.1:<0.30350.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:11] [ns_1@127.0.0.1:<0.30236.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:11] [ns_1@127.0.0.1:<0.30357.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:11] [ns_1@127.0.0.1:<0.30350.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:12] [ns_1@127.0.0.1:<0.30288.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:12] [ns_1@127.0.0.1:<0.30350.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:12] [ns_1@127.0.0.1:<0.30362.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:13] [ns_1@127.0.0.1:<0.30252.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:31:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.30350.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:31:13] [ns_1@127.0.0.1:<0.30370.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:14] [ns_1@127.0.0.1:<0.30300.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:14] [ns_1@127.0.0.1:<0.30375.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:31:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30356.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:31:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30390.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:31:15] [ns_1@127.0.0.1:<0.30263.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:15] [ns_1@127.0.0.1:<0.30382.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:16] [ns_1@127.0.0.1:<0.30313.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:16] [ns_1@127.0.0.1:<0.30391.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:17] [ns_1@127.0.0.1:<0.30273.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:17] [ns_1@127.0.0.1:<0.30397.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:18] [ns_1@127.0.0.1:<0.30329.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:18] [ns_1@127.0.0.1:<0.30401.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:19] [ns_1@127.0.0.1:<0.30293.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:19] [ns_1@127.0.0.1:<0.30416.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:19] [ns_1@127.0.0.1:<0.30408.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:31:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30390.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:31:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30425.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:31:20] [ns_1@127.0.0.1:<0.30339.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:20] [ns_1@127.0.0.1:<0.30416.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:20] [ns_1@127.0.0.1:<0.30412.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:21] [ns_1@127.0.0.1:<0.30306.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:21] [ns_1@127.0.0.1:<0.30416.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:21] [ns_1@127.0.0.1:<0.30426.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:22] [ns_1@127.0.0.1:<0.30359.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:22] [ns_1@127.0.0.1:<0.30416.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:22] [ns_1@127.0.0.1:<0.30432.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:23] [ns_1@127.0.0.1:<0.30322.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:31:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.30416.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:31:23] [ns_1@127.0.0.1:<0.30439.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:24] [ns_1@127.0.0.1:<0.30373.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:24] [ns_1@127.0.0.1:<0.30444.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:31:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30425.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:31:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30459.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:31:25] [ns_1@127.0.0.1:<0.30333.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:25] [ns_1@127.0.0.1:<0.30452.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:26] [ns_1@127.0.0.1:<0.30384.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:26] [ns_1@127.0.0.1:<0.30460.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:27] [ns_1@127.0.0.1:<0.30344.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:28] [ns_1@127.0.0.1:<0.30466.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:28] [ns_1@127.0.0.1:<0.30399.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:29] [ns_1@127.0.0.1:<0.30471.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:29] [ns_1@127.0.0.1:<0.30364.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:29] [ns_1@127.0.0.1:<0.30485.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:30] [ns_1@127.0.0.1:<0.30477.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:31:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30459.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:31:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30494.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:31:30] [ns_1@127.0.0.1:<0.30410.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:30] [ns_1@127.0.0.1:<0.30485.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:31] [ns_1@127.0.0.1:<0.30481.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:31] [ns_1@127.0.0.1:<0.30380.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:31] [ns_1@127.0.0.1:<0.30485.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:32] [ns_1@127.0.0.1:<0.30495.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:32] [ns_1@127.0.0.1:<0.30430.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:32] [ns_1@127.0.0.1:<0.30485.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:33] [ns_1@127.0.0.1:<0.30500.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:33] [ns_1@127.0.0.1:<0.30395.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:31:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.30485.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:31:34] [ns_1@127.0.0.1:<0.30507.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:34] [ns_1@127.0.0.1:<0.30442.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:31:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30494.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:31:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30526.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:31:35] [ns_1@127.0.0.1:<0.30513.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:35] [ns_1@127.0.0.1:<0.30406.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:36] [ns_1@127.0.0.1:<0.30520.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:36] [ns_1@127.0.0.1:<0.30454.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:37] [ns_1@127.0.0.1:<0.30529.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:37] [ns_1@127.0.0.1:<0.30419.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:38] [ns_1@127.0.0.1:<0.30536.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:38] [ns_1@127.0.0.1:<0.30469.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:39] [ns_1@127.0.0.1:<0.30540.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:39] [ns_1@127.0.0.1:<0.30437.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:39] [ns_1@127.0.0.1:<0.30555.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:31:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30526.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:31:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30561.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:31:40] [ns_1@127.0.0.1:<0.30546.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:40] [ns_1@127.0.0.1:<0.30479.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:40] [ns_1@127.0.0.1:<0.30555.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:41] [ns_1@127.0.0.1:<0.30551.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:41] [ns_1@127.0.0.1:<0.30450.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:41] [ns_1@127.0.0.1:<0.30555.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:42] [ns_1@127.0.0.1:<0.30564.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:42] [ns_1@127.0.0.1:<0.30498.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:42] [ns_1@127.0.0.1:<0.30555.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:43] [ns_1@127.0.0.1:<0.30569.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:43] [ns_1@127.0.0.1:<0.30464.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:31:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.30555.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:31:44] [ns_1@127.0.0.1:<0.30577.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:44] [ns_1@127.0.0.1:<0.30511.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:31:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30561.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:31:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30593.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:31:45] [ns_1@127.0.0.1:<0.30582.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:45] [ns_1@127.0.0.1:<0.30475.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:46] [ns_1@127.0.0.1:<0.30589.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:46] [ns_1@127.0.0.1:<0.30527.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:47] [ns_1@127.0.0.1:<0.30598.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:47] [ns_1@127.0.0.1:<0.30489.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:48] [ns_1@127.0.0.1:<0.30604.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:48] [ns_1@127.0.0.1:<0.30538.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:49] [ns_1@127.0.0.1:<0.30610.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:49] [ns_1@127.0.0.1:<0.30621.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:49] [ns_1@127.0.0.1:<0.30505.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:31:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30593.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:31:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30627.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:31:50] [ns_1@127.0.0.1:<0.30615.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:50] [ns_1@127.0.0.1:<0.30621.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:50] [ns_1@127.0.0.1:<0.30549.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:51] [ns_1@127.0.0.1:<0.30624.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:51] [ns_1@127.0.0.1:<0.30621.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:51] [ns_1@127.0.0.1:<0.30518.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:51] [ns_1@127.0.0.1:<0.30534.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:51] [ns_1@127.0.0.1:<0.30544.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:51] [ns_1@127.0.0.1:<0.30562.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:51] [ns_1@127.0.0.1:<0.30575.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:52] [ns_1@127.0.0.1:<0.30635.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:52] [ns_1@127.0.0.1:<0.30621.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:52] [ns_1@127.0.0.1:<0.30567.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:53] [ns_1@127.0.0.1:<0.30642.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:31:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.30621.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:31:53] [ns_1@127.0.0.1:<0.30587.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:54] [ns_1@127.0.0.1:<0.30655.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:54] [ns_1@127.0.0.1:<0.30580.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:31:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30627.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:31:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30670.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:31:55] [ns_1@127.0.0.1:<0.30644.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:55] [ns_1@127.0.0.1:<0.30602.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:56] [ns_1@127.0.0.1:<0.30667.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:56] [ns_1@127.0.0.1:<0.30596.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:57] [ns_1@127.0.0.1:<0.30646.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:57] [ns_1@127.0.0.1:<0.30613.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:58] [ns_1@127.0.0.1:<0.30682.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:58] [ns_1@127.0.0.1:<0.30606.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:31:59] [ns_1@127.0.0.1:<0.30648.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:31:59] [ns_1@127.0.0.1:<0.30699.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:31:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,750715,104296}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37814744}, {processes,10385240}, {processes_used,8759760}, {system,27429504}, {atom,1306681}, {atom_used,1284164}, {binary,533240}, {code,12859877}, {ets,2024912}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,1736}, {memory_data,{4040077312,4013568000,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25888 kB\nBuffers: 57120 kB\nCached: 3533676 kB\nSwapCached: 0 kB\nActive: 305284 kB\nInactive: 3446076 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25888 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 52 kB\nWriteback: 0 kB\nAnonPages: 160556 kB\nMapped: 24868 kB\nSlab: 134436 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580356 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3618484224}, {buffered_memory,58490880}, {free_memory,26509312}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{1731841,0}}, {context_switches,{780573,0}}, {garbage_collection,{399442,496207270,0}}, {io,{{input,19425539},{output,26338716}}}, {reductions,{174655718,662658}}, {run_queue,0}, {runtime,{30970,150}}]}]}] [stats:error] [2012-03-26 1:31:59] [ns_1@127.0.0.1:<0.30631.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:32:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30670.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:32:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30706.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:32:00] [ns_1@127.0.0.1:<0.30692.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:00] [ns_1@127.0.0.1:<0.30699.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:01] [ns_1@127.0.0.1:<0.30617.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:01] [ns_1@127.0.0.1:<0.30650.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:01] [ns_1@127.0.0.1:<0.30699.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:02] [ns_1@127.0.0.1:<0.30652.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:02] [ns_1@127.0.0.1:<0.30712.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:02] [ns_1@127.0.0.1:<0.30699.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:03] [ns_1@127.0.0.1:<0.30637.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:03] [ns_1@127.0.0.1:<0.30663.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:32:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.30699.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:32:04] [ns_1@127.0.0.1:<0.30665.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:04] [ns_1@127.0.0.1:<0.30725.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:05] [ns_1@127.0.0.1:<0.30657.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:32:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30706.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:32:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30739.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:32:05] [ns_1@127.0.0.1:<0.30677.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:06] [ns_1@127.0.0.1:<0.30679.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:06] [ns_1@127.0.0.1:<0.30736.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:07] [ns_1@127.0.0.1:<0.30673.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:07] [ns_1@127.0.0.1:<0.30688.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:08] [ns_1@127.0.0.1:<0.30690.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:08] [ns_1@127.0.0.1:<0.30752.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:09] [ns_1@127.0.0.1:<0.30684.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:09] [ns_1@127.0.0.1:<0.30703.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:09] [ns_1@127.0.0.1:<0.30771.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:32:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30739.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:32:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30775.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:32:10] [ns_1@127.0.0.1:<0.30709.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:10] [ns_1@127.0.0.1:<0.30763.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:10] [ns_1@127.0.0.1:<0.30771.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:11] [ns_1@127.0.0.1:<0.30694.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:11] [ns_1@127.0.0.1:<0.30719.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:11] [ns_1@127.0.0.1:<0.30771.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:12] [ns_1@127.0.0.1:<0.30721.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:12] [ns_1@127.0.0.1:<0.30783.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:12] [ns_1@127.0.0.1:<0.30771.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:13] [ns_1@127.0.0.1:<0.30714.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:13] [ns_1@127.0.0.1:<0.30732.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:32:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.30771.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:32:14] [ns_1@127.0.0.1:<0.30734.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:14] [ns_1@127.0.0.1:<0.30796.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:32:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30775.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:32:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30809.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:32:15] [ns_1@127.0.0.1:<0.30727.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:15] [ns_1@127.0.0.1:<0.30748.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:16] [ns_1@127.0.0.1:<0.30750.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:16] [ns_1@127.0.0.1:<0.30812.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:17] [ns_1@127.0.0.1:<0.30743.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:17] [ns_1@127.0.0.1:<0.30758.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:18] [ns_1@127.0.0.1:<0.30760.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:18] [ns_1@127.0.0.1:<0.30822.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:19] [ns_1@127.0.0.1:<0.30754.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:19] [ns_1@127.0.0.1:<0.30778.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:19] [ns_1@127.0.0.1:<0.30839.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:32:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30809.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:32:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30843.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:32:20] [ns_1@127.0.0.1:<0.30780.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:20] [ns_1@127.0.0.1:<0.30839.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:20] [ns_1@127.0.0.1:<0.30833.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:21] [ns_1@127.0.0.1:<0.30765.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:21] [ns_1@127.0.0.1:<0.30839.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:21] [ns_1@127.0.0.1:<0.30791.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:22] [ns_1@127.0.0.1:<0.30793.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:22] [ns_1@127.0.0.1:<0.30839.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:22] [ns_1@127.0.0.1:<0.30853.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:23] [ns_1@127.0.0.1:<0.30785.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:32:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.30839.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:32:23] [ns_1@127.0.0.1:<0.30803.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:24] [ns_1@127.0.0.1:<0.30805.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:24] [ns_1@127.0.0.1:<0.30865.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:32:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30843.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:32:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30878.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:32:25] [ns_1@127.0.0.1:<0.30798.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:25] [ns_1@127.0.0.1:<0.30818.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:26] [ns_1@127.0.0.1:<0.30820.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:26] [ns_1@127.0.0.1:<0.30881.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:27] [ns_1@127.0.0.1:<0.30814.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:27] [ns_1@127.0.0.1:<0.30829.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:28] [ns_1@127.0.0.1:<0.30831.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:28] [ns_1@127.0.0.1:<0.30892.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:29] [ns_1@127.0.0.1:<0.30824.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:29] [ns_1@127.0.0.1:<0.30906.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:29] [ns_1@127.0.0.1:<0.30847.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:32:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30878.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:32:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30913.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:32:30] [ns_1@127.0.0.1:<0.30850.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:30] [ns_1@127.0.0.1:<0.30906.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:30] [ns_1@127.0.0.1:<0.30902.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:31] [ns_1@127.0.0.1:<0.30835.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:31] [ns_1@127.0.0.1:<0.30906.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:31] [ns_1@127.0.0.1:<0.30860.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:32] [ns_1@127.0.0.1:<0.30863.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:32] [ns_1@127.0.0.1:<0.30906.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:32] [ns_1@127.0.0.1:<0.30921.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:33] [ns_1@127.0.0.1:<0.30858.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:32:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.30906.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:32:33] [ns_1@127.0.0.1:<0.30873.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:34] [ns_1@127.0.0.1:<0.30875.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:34] [ns_1@127.0.0.1:<0.30934.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:32:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30913.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:32:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30946.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:32:35] [ns_1@127.0.0.1:<0.30871.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:36] [ns_1@127.0.0.1:<0.30887.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:36] [ns_1@127.0.0.1:<0.30890.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:37] [ns_1@127.0.0.1:<0.30950.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:37] [ns_1@127.0.0.1:<0.30885.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:38] [ns_1@127.0.0.1:<0.30898.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:38] [ns_1@127.0.0.1:<0.30900.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:39] [ns_1@127.0.0.1:<0.30961.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:39] [ns_1@127.0.0.1:<0.30896.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:39] [ns_1@127.0.0.1:<0.30976.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:40] [ns_1@127.0.0.1:<0.30916.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:32:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30946.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:32:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30982.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:32:40] [ns_1@127.0.0.1:<0.30919.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:40] [ns_1@127.0.0.1:<0.30976.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:41] [ns_1@127.0.0.1:<0.30972.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:41] [ns_1@127.0.0.1:<0.30910.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:41] [ns_1@127.0.0.1:<0.30976.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:42] [ns_1@127.0.0.1:<0.30928.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:42] [ns_1@127.0.0.1:<0.30932.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:42] [ns_1@127.0.0.1:<0.30976.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:43] [ns_1@127.0.0.1:<0.30990.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:43] [ns_1@127.0.0.1:<0.30926.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:32:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.30976.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:32:44] [ns_1@127.0.0.1:<0.30941.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:44] [ns_1@127.0.0.1:<0.30943.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:32:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30982.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:32:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31014.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:32:45] [ns_1@127.0.0.1:<0.31003.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:45] [ns_1@127.0.0.1:<0.30939.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:46] [ns_1@127.0.0.1:<0.30957.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:46] [ns_1@127.0.0.1:<0.30959.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:47] [ns_1@127.0.0.1:<0.31019.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:47] [ns_1@127.0.0.1:<0.30955.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:48] [ns_1@127.0.0.1:<0.30967.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:48] [ns_1@127.0.0.1:<0.30970.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:49] [ns_1@127.0.0.1:<0.31029.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:49] [ns_1@127.0.0.1:<0.30965.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:49] [ns_1@127.0.0.1:<0.31044.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:32:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31014.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:32:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31048.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:32:50] [ns_1@127.0.0.1:<0.30985.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:50] [ns_1@127.0.0.1:<0.30988.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:50] [ns_1@127.0.0.1:<0.31044.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:51] [ns_1@127.0.0.1:<0.31040.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:51] [ns_1@127.0.0.1:<0.30979.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:51] [ns_1@127.0.0.1:<0.31044.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:52] [ns_1@127.0.0.1:<0.30998.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:52] [ns_1@127.0.0.1:<0.31036.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:52] [ns_1@127.0.0.1:<0.31054.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:52] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason shutdown [ns_server:info] [2012-03-26 1:32:52] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:32:52] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:32:52] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:32:52] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:32:52] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [menelaus:info] [2012-03-26 1:32:57] [ns_1@127.0.0.1:<0.31001.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [ns_server:warn] [2012-03-26 1:32:57] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 1:32:57] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:32:57] [ns_1@127.0.0.1:<0.31023.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:32:57] [ns_1@127.0.0.1:<0.31102.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:57] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:32:57] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:32:57] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:32:57] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:32:57] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:32:57] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:32:57] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:32:57] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [stats:error] [2012-03-26 1:32:57] [ns_1@127.0.0.1:<0.31034.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:57] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:32:57] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:32:59: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 1:32:57] [ns_1@127.0.0.1:<0.31052.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:58] [ns_1@127.0.0.1:<0.31102.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:58] [ns_1@127.0.0.1:<0.31017.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:58] [ns_1@127.0.0.1:<0.31027.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:59] [ns_1@127.0.0.1:<0.31102.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:32:59] [ns_1@127.0.0.1:<0.31114.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:32:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.31102.0>} [ns_doctor:info] [2012-03-26 1:32:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,750775,129240}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37708128}, {processes,10242616}, {processes_used,8620056}, {system,27465512}, {atom,1306681}, {atom_used,1284164}, {binary,532896}, {code,12859877}, {ets,2054016}]}, {system_stats, [{cpu_utilization_rate,25.56390977443609}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,1796}, {memory_data,{4040077312,4013568000,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25640 kB\nBuffers: 57208 kB\nCached: 3533848 kB\nSwapCached: 0 kB\nActive: 305400 kB\nInactive: 3446204 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25640 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 76 kB\nWriteback: 0 kB\nAnonPages: 160560 kB\nMapped: 24868 kB\nSlab: 134432 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580356 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3618660352}, {buffered_memory,58580992}, {free_memory,26255360}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{1791866,0}}, {context_switches,{793667,0}}, {garbage_collection,{406713,507547666,0}}, {io,{{input,19456128},{output,26782670}}}, {reductions,{177282505,630000}}, {run_queue,0}, {runtime,{31550,130}}]}]}] [stats:error] [2012-03-26 1:32:59] [ns_1@127.0.0.1:<0.31065.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:33:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31048.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:33:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31144.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:33:00] [ns_1@127.0.0.1:<0.31102.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:00] [ns_1@127.0.0.1:<0.31119.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:00] [ns_1@127.0.0.1:<0.31038.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:01] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:33:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.31102.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:33:01] [ns_1@127.0.0.1:<0.31141.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:01] [ns_1@127.0.0.1:<0.31067.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:02] [ns_1@127.0.0.1:<0.31150.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:02] [ns_1@127.0.0.1:<0.31058.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:03] [ns_1@127.0.0.1:<0.31157.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:03] [ns_1@127.0.0.1:<0.31070.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:04] [ns_1@127.0.0.1:<0.31162.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:04] [ns_1@127.0.0.1:<0.31095.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:33:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31144.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:33:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31175.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:33:05] [ns_1@127.0.0.1:<0.31168.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:05] [ns_1@127.0.0.1:<0.31073.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:06] [ns_1@127.0.0.1:<0.31172.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:06] [ns_1@127.0.0.1:<0.31096.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:07] [ns_1@127.0.0.1:<0.31184.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:07] [ns_1@127.0.0.1:<0.31075.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:08] [ns_1@127.0.0.1:<0.31188.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:08] [ns_1@127.0.0.1:<0.31097.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:09] [ns_1@127.0.0.1:<0.31194.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:09] [ns_1@127.0.0.1:<0.31207.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:09] [ns_1@127.0.0.1:<0.31077.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:33:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31175.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:33:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31213.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:33:10] [ns_1@127.0.0.1:<0.31199.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:10] [ns_1@127.0.0.1:<0.31207.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:11] [ns_1@127.0.0.1:<0.31098.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:11] [ns_1@127.0.0.1:<0.31210.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:11] [ns_1@127.0.0.1:<0.31207.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:12] [ns_1@127.0.0.1:<0.31117.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:12] [ns_1@127.0.0.1:<0.31219.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:12] [ns_1@127.0.0.1:<0.31207.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:13] [ns_1@127.0.0.1:<0.31107.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:13] [ns_1@127.0.0.1:<0.31227.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:33:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.31207.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:33:14] [ns_1@127.0.0.1:<0.31148.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:14] [ns_1@127.0.0.1:<0.31232.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:15] [ns_1@127.0.0.1:<0.31122.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:33:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31213.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:33:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31247.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:33:15] [ns_1@127.0.0.1:<0.31239.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:16] [ns_1@127.0.0.1:<0.31159.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:16] [ns_1@127.0.0.1:<0.31244.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:17] [ns_1@127.0.0.1:<0.31153.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:17] [ns_1@127.0.0.1:<0.31254.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:18] [ns_1@127.0.0.1:<0.31170.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:18] [ns_1@127.0.0.1:<0.31258.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:19] [ns_1@127.0.0.1:<0.31164.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:19] [ns_1@127.0.0.1:<0.31265.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:19] [ns_1@127.0.0.1:<0.31275.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:33:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31247.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:33:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31279.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:33:20] [ns_1@127.0.0.1:<0.31186.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:20] [ns_1@127.0.0.1:<0.31269.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:20] [ns_1@127.0.0.1:<0.31275.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:21] [ns_1@127.0.0.1:<0.31179.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:21] [ns_1@127.0.0.1:<0.31283.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:21] [ns_1@127.0.0.1:<0.31275.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:22] [ns_1@127.0.0.1:<0.31196.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:22] [ns_1@127.0.0.1:<0.31289.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:22] [ns_1@127.0.0.1:<0.31275.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:23] [ns_1@127.0.0.1:<0.31190.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:23] [ns_1@127.0.0.1:<0.31296.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:33:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.31275.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:33:24] [ns_1@127.0.0.1:<0.31216.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:24] [ns_1@127.0.0.1:<0.31301.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:33:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31279.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:33:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31314.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:33:25] [ns_1@127.0.0.1:<0.31201.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:25] [ns_1@127.0.0.1:<0.31309.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:26] [ns_1@127.0.0.1:<0.31229.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:26] [ns_1@127.0.0.1:<0.31317.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:27] [ns_1@127.0.0.1:<0.31221.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:27] [ns_1@127.0.0.1:<0.31323.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:28] [ns_1@127.0.0.1:<0.31241.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:28] [ns_1@127.0.0.1:<0.31328.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:29] [ns_1@127.0.0.1:<0.31234.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:29] [ns_1@127.0.0.1:<0.31342.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:29] [ns_1@127.0.0.1:<0.31334.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:33:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31314.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:33:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31349.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:33:30] [ns_1@127.0.0.1:<0.31256.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:30] [ns_1@127.0.0.1:<0.31342.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:30] [ns_1@127.0.0.1:<0.31338.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:31] [ns_1@127.0.0.1:<0.31250.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:31] [ns_1@127.0.0.1:<0.31342.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:31] [ns_1@127.0.0.1:<0.31352.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:32] [ns_1@127.0.0.1:<0.31267.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:32] [ns_1@127.0.0.1:<0.31342.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:32] [ns_1@127.0.0.1:<0.31357.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:33] [ns_1@127.0.0.1:<0.31260.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:33:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.31342.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:33:33] [ns_1@127.0.0.1:<0.31364.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:34] [ns_1@127.0.0.1:<0.31285.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:34] [ns_1@127.0.0.1:<0.31370.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:33:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31349.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:33:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31382.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:33:35] [ns_1@127.0.0.1:<0.31271.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:35] [ns_1@127.0.0.1:<0.31377.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:36] [ns_1@127.0.0.1:<0.31298.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:36] [ns_1@127.0.0.1:<0.31386.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:37] [ns_1@127.0.0.1:<0.31291.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:37] [ns_1@127.0.0.1:<0.31393.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:38] [ns_1@127.0.0.1:<0.31311.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:38] [ns_1@127.0.0.1:<0.31397.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:39] [ns_1@127.0.0.1:<0.31303.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:39] [ns_1@127.0.0.1:<0.31412.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:39] [ns_1@127.0.0.1:<0.31403.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:33:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31382.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:33:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31418.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:33:40] [ns_1@127.0.0.1:<0.31325.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:40] [ns_1@127.0.0.1:<0.31412.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:40] [ns_1@127.0.0.1:<0.31408.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:41] [ns_1@127.0.0.1:<0.31319.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:41] [ns_1@127.0.0.1:<0.31412.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:41] [ns_1@127.0.0.1:<0.31421.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:42] [ns_1@127.0.0.1:<0.31336.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:42] [ns_1@127.0.0.1:<0.31412.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:42] [ns_1@127.0.0.1:<0.31426.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:43] [ns_1@127.0.0.1:<0.31330.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:33:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.31412.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:33:44] [ns_1@127.0.0.1:<0.31434.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:44] [ns_1@127.0.0.1:<0.31355.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:45] [ns_1@127.0.0.1:<0.31439.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:33:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31418.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:33:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31452.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:33:45] [ns_1@127.0.0.1:<0.31345.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:46] [ns_1@127.0.0.1:<0.31446.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:46] [ns_1@127.0.0.1:<0.31368.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:47] [ns_1@127.0.0.1:<0.31455.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:47] [ns_1@127.0.0.1:<0.31362.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:48] [ns_1@127.0.0.1:<0.31461.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:48] [ns_1@127.0.0.1:<0.31379.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:49] [ns_1@127.0.0.1:<0.31465.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:49] [ns_1@127.0.0.1:<0.31375.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:49] [ns_1@127.0.0.1:<0.31480.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:50] [ns_1@127.0.0.1:<0.31472.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:33:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31452.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:33:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31486.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:33:50] [ns_1@127.0.0.1:<0.31395.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:50] [ns_1@127.0.0.1:<0.31480.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:51] [ns_1@127.0.0.1:<0.31476.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:51] [ns_1@127.0.0.1:<0.31391.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:51] [ns_1@127.0.0.1:<0.31480.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:52] [ns_1@127.0.0.1:<0.31490.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:52] [ns_1@127.0.0.1:<0.31406.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:52] [ns_1@127.0.0.1:<0.31480.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:53] [ns_1@127.0.0.1:<0.31496.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:53] [ns_1@127.0.0.1:<0.31401.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:33:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.31480.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:33:54] [ns_1@127.0.0.1:<0.31503.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:54] [ns_1@127.0.0.1:<0.31424.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:33:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31486.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:33:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31519.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:33:55] [ns_1@127.0.0.1:<0.31508.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:55] [ns_1@127.0.0.1:<0.31415.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:56] [ns_1@127.0.0.1:<0.31516.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:56] [ns_1@127.0.0.1:<0.31437.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:57] [ns_1@127.0.0.1:<0.31524.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:33:57] [ns_1@127.0.0.1:<0.31432.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:33:59] [ns_1@127.0.0.1:<0.31542.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:33:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,750835,153248}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37863000}, {processes,10340608}, {processes_used,8715128}, {system,27522392}, {atom,1306681}, {atom_used,1284164}, {binary,551616}, {code,12859877}, {ets,2086224}]}, {system_stats, [{cpu_utilization_rate,25.81453634085213}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,1856}, {memory_data,{4040077312,4013948928,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25268 kB\nBuffers: 57380 kB\nCached: 3533580 kB\nSwapCached: 0 kB\nActive: 305452 kB\nInactive: 3446140 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25268 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 16 kB\nWriteback: 0 kB\nAnonPages: 160624 kB\nMapped: 24868 kB\nSlab: 134412 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580280 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3618385920}, {buffered_memory,58757120}, {free_memory,25874432}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{1851891,0}}, {context_switches,{807311,0}}, {garbage_collection,{413994,519363085,0}}, {io,{{input,19738193},{output,27543036}}}, {reductions,{180035738,640671}}, {run_queue,0}, {runtime,{32160,150}}]}]}] [error_logger:error] [2012-03-26 1:34:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31519.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:34:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31547.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:34:00] [ns_1@127.0.0.1:<0.31542.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:01] [ns_1@127.0.0.1:<0.31542.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:02] [ns_1@127.0.0.1:<0.31542.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:34:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.31542.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 1:34:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31547.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:34:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31560.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:34:07] [ns_1@127.0.0.1:<0.31444.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:08] [ns_1@127.0.0.1:<0.31530.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:08] [ns_1@127.0.0.1:<0.31449.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:09] [ns_1@127.0.0.1:<0.31535.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:09] [ns_1@127.0.0.1:<0.31459.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:09] [ns_1@127.0.0.1:<0.31584.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:34:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31560.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:34:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31588.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:34:10] [ns_1@127.0.0.1:<0.31573.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:10] [ns_1@127.0.0.1:<0.31463.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:10] [ns_1@127.0.0.1:<0.31584.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:11] [ns_1@127.0.0.1:<0.31567.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:11] [ns_1@127.0.0.1:<0.31584.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:12] [ns_1@127.0.0.1:<0.31584.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [stats:error] [2012-03-26 1:34:11] [ns_1@127.0.0.1:<0.31470.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:34:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.31584.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:34:14] [ns_1@127.0.0.1:<0.31593.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:14] [ns_1@127.0.0.1:<0.31474.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:34:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31588.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:34:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31614.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:34:15] [ns_1@127.0.0.1:<0.31578.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:15] [ns_1@127.0.0.1:<0.31483.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:16] [ns_1@127.0.0.1:<0.31611.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:16] [ns_1@127.0.0.1:<0.31494.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:17] [ns_1@127.0.0.1:<0.31600.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:17] [ns_1@127.0.0.1:<0.31501.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:18] [ns_1@127.0.0.1:<0.31625.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:18] [ns_1@127.0.0.1:<0.31506.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:19] [ns_1@127.0.0.1:<0.31621.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:19] [ns_1@127.0.0.1:<0.31642.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:19] [ns_1@127.0.0.1:<0.31514.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:34:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31614.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:34:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31648.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:34:20] [ns_1@127.0.0.1:<0.31636.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:20] [ns_1@127.0.0.1:<0.31642.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:20] [ns_1@127.0.0.1:<0.31522.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:21] [ns_1@127.0.0.1:<0.31632.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:21] [ns_1@127.0.0.1:<0.31642.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:21] [ns_1@127.0.0.1:<0.31528.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:22] [ns_1@127.0.0.1:<0.31656.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:22] [ns_1@127.0.0.1:<0.31642.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:22] [ns_1@127.0.0.1:<0.31533.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:23] [ns_1@127.0.0.1:<0.31645.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:34:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.31642.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:34:23] [ns_1@127.0.0.1:<0.31571.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:24] [ns_1@127.0.0.1:<0.31668.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:24] [ns_1@127.0.0.1:<0.31576.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:34:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31648.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:34:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31683.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:34:25] [ns_1@127.0.0.1:<0.31663.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:26] [ns_1@127.0.0.1:<0.31591.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:26] [ns_1@127.0.0.1:<0.31680.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:27] [ns_1@127.0.0.1:<0.31596.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:27] [ns_1@127.0.0.1:<0.31676.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:28] [ns_1@127.0.0.1:<0.31608.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:28] [ns_1@127.0.0.1:<0.31695.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:29] [ns_1@127.0.0.1:<0.31617.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:29] [ns_1@127.0.0.1:<0.31690.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:29] [ns_1@127.0.0.1:<0.31711.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:30] [ns_1@127.0.0.1:<0.31623.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:34:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31683.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:34:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31718.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:34:30] [ns_1@127.0.0.1:<0.31705.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:30] [ns_1@127.0.0.1:<0.31711.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:31] [ns_1@127.0.0.1:<0.31627.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:31] [ns_1@127.0.0.1:<0.31701.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:31] [ns_1@127.0.0.1:<0.31711.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:32] [ns_1@127.0.0.1:<0.31634.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:32] [ns_1@127.0.0.1:<0.31724.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:32] [ns_1@127.0.0.1:<0.31711.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:33] [ns_1@127.0.0.1:<0.31638.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:33] [ns_1@127.0.0.1:<0.31715.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:34:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.31711.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:34:34] [ns_1@127.0.0.1:<0.31652.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:34] [ns_1@127.0.0.1:<0.31737.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:34:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31718.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:34:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31749.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:34:35] [ns_1@127.0.0.1:<0.31658.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:35] [ns_1@127.0.0.1:<0.31731.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:36] [ns_1@127.0.0.1:<0.31665.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:36] [ns_1@127.0.0.1:<0.31750.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:37] [ns_1@127.0.0.1:<0.31670.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:37] [ns_1@127.0.0.1:<0.31744.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:38] [ns_1@127.0.0.1:<0.31678.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:38] [ns_1@127.0.0.1:<0.31764.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:39] [ns_1@127.0.0.1:<0.31686.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:39] [ns_1@127.0.0.1:<0.31760.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:39] [ns_1@127.0.0.1:<0.31781.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:34:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31749.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:34:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31785.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:34:40] [ns_1@127.0.0.1:<0.31692.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:40] [ns_1@127.0.0.1:<0.31775.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:40] [ns_1@127.0.0.1:<0.31781.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:41] [ns_1@127.0.0.1:<0.31697.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:41] [ns_1@127.0.0.1:<0.31770.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:41] [ns_1@127.0.0.1:<0.31781.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:42] [ns_1@127.0.0.1:<0.31703.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:42] [ns_1@127.0.0.1:<0.31793.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:42] [ns_1@127.0.0.1:<0.31781.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:43] [ns_1@127.0.0.1:<0.31707.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:43] [ns_1@127.0.0.1:<0.31788.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:34:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.31781.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:34:44] [ns_1@127.0.0.1:<0.31721.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:44] [ns_1@127.0.0.1:<0.31806.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:34:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31785.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:34:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31819.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:34:45] [ns_1@127.0.0.1:<0.31726.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:45] [ns_1@127.0.0.1:<0.31801.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:46] [ns_1@127.0.0.1:<0.31733.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:46] [ns_1@127.0.0.1:<0.31822.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:47] [ns_1@127.0.0.1:<0.31739.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:47] [ns_1@127.0.0.1:<0.31813.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:48] [ns_1@127.0.0.1:<0.31746.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:48] [ns_1@127.0.0.1:<0.31832.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:49] [ns_1@127.0.0.1:<0.31755.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:49] [ns_1@127.0.0.1:<0.31847.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:49] [ns_1@127.0.0.1:<0.31828.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:34:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31819.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:34:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31853.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:34:50] [ns_1@127.0.0.1:<0.31762.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:50] [ns_1@127.0.0.1:<0.31847.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:50] [ns_1@127.0.0.1:<0.31843.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:51] [ns_1@127.0.0.1:<0.31766.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:51] [ns_1@127.0.0.1:<0.31847.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:51] [ns_1@127.0.0.1:<0.31839.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:52] [ns_1@127.0.0.1:<0.31772.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:52] [ns_1@127.0.0.1:<0.31847.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:52] [ns_1@127.0.0.1:<0.31863.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:53] [ns_1@127.0.0.1:<0.31777.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:34:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.31847.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:34:53] [ns_1@127.0.0.1:<0.31857.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:54] [ns_1@127.0.0.1:<0.31790.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:54] [ns_1@127.0.0.1:<0.31875.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:34:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31853.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:34:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31888.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:34:55] [ns_1@127.0.0.1:<0.31795.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:55] [ns_1@127.0.0.1:<0.31870.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:56] [ns_1@127.0.0.1:<0.31803.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:56] [ns_1@127.0.0.1:<0.31891.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:57] [ns_1@127.0.0.1:<0.31808.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:57] [ns_1@127.0.0.1:<0.31883.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:58] [ns_1@127.0.0.1:<0.31815.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:58] [ns_1@127.0.0.1:<0.31902.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:34:59] [ns_1@127.0.0.1:<0.31826.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:34:59] [ns_1@127.0.0.1:<0.31917.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:34:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,750895,181330}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37827960}, {processes,10261632}, {processes_used,8636152}, {system,27566328}, {atom,1306681}, {atom_used,1284164}, {binary,560504}, {code,12859877}, {ets,2114632}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,1916}, {memory_data,{4040077312,4014202880,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25268 kB\nBuffers: 57476 kB\nCached: 3533736 kB\nSwapCached: 0 kB\nActive: 305612 kB\nInactive: 3446244 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25268 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 16 kB\nWriteback: 0 kB\nAnonPages: 160660 kB\nMapped: 24868 kB\nSlab: 134408 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582560 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3618545664}, {buffered_memory,58855424}, {free_memory,25874432}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{1911919,1}}, {context_switches,{819197,0}}, {garbage_collection,{420609,528844616,0}}, {io,{{input,19768737},{output,27938225}}}, {reductions,{182328779,630636}}, {run_queue,0}, {runtime,{32690,150}}]}]}] [stats:error] [2012-03-26 1:34:59] [ns_1@127.0.0.1:<0.31897.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:35:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31888.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:35:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31924.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:35:00] [ns_1@127.0.0.1:<0.31830.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:00] [ns_1@127.0.0.1:<0.31917.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:01] [ns_1@127.0.0.1:<0.31912.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:01] [ns_1@127.0.0.1:<0.31837.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:01] [ns_1@127.0.0.1:<0.31917.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:02] [ns_1@127.0.0.1:<0.31908.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:02] [ns_1@127.0.0.1:<0.31841.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:02] [ns_1@127.0.0.1:<0.31917.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:03] [ns_1@127.0.0.1:<0.31932.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:03] [ns_1@127.0.0.1:<0.31850.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:35:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.31917.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:35:04] [ns_1@127.0.0.1:<0.31927.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:04] [ns_1@127.0.0.1:<0.31861.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:05] [ns_1@127.0.0.1:<0.31945.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:35:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31924.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:35:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31957.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:35:05] [ns_1@127.0.0.1:<0.31868.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:06] [ns_1@127.0.0.1:<0.31939.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:06] [ns_1@127.0.0.1:<0.31873.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:07] [ns_1@127.0.0.1:<0.31961.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:07] [ns_1@127.0.0.1:<0.31881.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:08] [ns_1@127.0.0.1:<0.31952.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:08] [ns_1@127.0.0.1:<0.31968.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:08] [ns_1@127.0.0.1:<0.31885.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:08] [ns_1@127.0.0.1:<0.31900.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:09] [ns_1@127.0.0.1:<0.31972.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:09] [ns_1@127.0.0.1:<0.31895.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:09] [ns_1@127.0.0.1:<0.31993.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:35:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31957.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:35:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31997.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:35:10] [ns_1@127.0.0.1:<0.31982.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:10] [ns_1@127.0.0.1:<0.31910.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:10] [ns_1@127.0.0.1:<0.31993.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:11] [ns_1@127.0.0.1:<0.31987.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:11] [ns_1@127.0.0.1:<0.31906.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:11] [ns_1@127.0.0.1:<0.31993.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:12] [ns_1@127.0.0.1:<0.32002.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:12] [ns_1@127.0.0.1:<0.31930.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:12] [ns_1@127.0.0.1:<0.31993.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:13] [ns_1@127.0.0.1:<0.32007.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:13] [ns_1@127.0.0.1:<0.31921.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:35:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.31993.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:35:14] [ns_1@127.0.0.1:<0.32015.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:14] [ns_1@127.0.0.1:<0.31943.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:35:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31997.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:35:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32031.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:35:15] [ns_1@127.0.0.1:<0.32020.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:15] [ns_1@127.0.0.1:<0.31937.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:16] [ns_1@127.0.0.1:<0.32027.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:16] [ns_1@127.0.0.1:<0.31954.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:17] [ns_1@127.0.0.1:<0.32036.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:17] [ns_1@127.0.0.1:<0.31950.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:18] [ns_1@127.0.0.1:<0.32042.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:18] [ns_1@127.0.0.1:<0.31970.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:19] [ns_1@127.0.0.1:<0.32046.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:19] [ns_1@127.0.0.1:<0.32060.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:19] [ns_1@127.0.0.1:<0.31966.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:35:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32031.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:35:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32065.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:35:20] [ns_1@127.0.0.1:<0.32053.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:20] [ns_1@127.0.0.1:<0.32060.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:20] [ns_1@127.0.0.1:<0.31985.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:21] [ns_1@127.0.0.1:<0.32059.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:21] [ns_1@127.0.0.1:<0.32060.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:21] [ns_1@127.0.0.1:<0.31976.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:22] [ns_1@127.0.0.1:<0.32072.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:22] [ns_1@127.0.0.1:<0.32060.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:22] [ns_1@127.0.0.1:<0.32005.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:23] [ns_1@127.0.0.1:<0.32080.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:35:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.32060.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:35:23] [ns_1@127.0.0.1:<0.31978.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:24] [ns_1@127.0.0.1:<0.32085.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:24] [ns_1@127.0.0.1:<0.32018.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:35:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32065.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:35:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32100.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:35:25] [ns_1@127.0.0.1:<0.32093.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:25] [ns_1@127.0.0.1:<0.31980.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:26] [ns_1@127.0.0.1:<0.32097.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:26] [ns_1@127.0.0.1:<0.32034.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:27] [ns_1@127.0.0.1:<0.32107.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:27] [ns_1@127.0.0.1:<0.31998.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:28] [ns_1@127.0.0.1:<0.32112.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:28] [ns_1@127.0.0.1:<0.32044.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:29] [ns_1@127.0.0.1:<0.32118.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:29] [ns_1@127.0.0.1:<0.32128.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:29] [ns_1@127.0.0.1:<0.32013.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:35:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32100.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:35:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32135.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:35:30] [ns_1@127.0.0.1:<0.32122.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:30] [ns_1@127.0.0.1:<0.32128.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:30] [ns_1@127.0.0.1:<0.32055.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:31] [ns_1@127.0.0.1:<0.32132.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:31] [ns_1@127.0.0.1:<0.32128.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:31] [ns_1@127.0.0.1:<0.32025.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:32] [ns_1@127.0.0.1:<0.32141.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:32] [ns_1@127.0.0.1:<0.32128.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:32] [ns_1@127.0.0.1:<0.32075.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:33] [ns_1@127.0.0.1:<0.32148.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:35:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.32128.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:35:33] [ns_1@127.0.0.1:<0.32040.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:34] [ns_1@127.0.0.1:<0.32154.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:35] [ns_1@127.0.0.1:<0.32087.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:35:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32135.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:35:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32168.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:35:35] [ns_1@127.0.0.1:<0.32161.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:36] [ns_1@127.0.0.1:<0.32051.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:36] [ns_1@127.0.0.1:<0.32165.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:37] [ns_1@127.0.0.1:<0.32103.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:37] [ns_1@127.0.0.1:<0.32177.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:38] [ns_1@127.0.0.1:<0.32069.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:38] [ns_1@127.0.0.1:<0.32181.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:39] [ns_1@127.0.0.1:<0.32114.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:39] [ns_1@127.0.0.1:<0.32187.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:39] [ns_1@127.0.0.1:<0.32198.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:40] [ns_1@127.0.0.1:<0.32082.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:35:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32168.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:35:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32204.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:35:40] [ns_1@127.0.0.1:<0.32192.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:40] [ns_1@127.0.0.1:<0.32198.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:41] [ns_1@127.0.0.1:<0.32124.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:41] [ns_1@127.0.0.1:<0.32201.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:41] [ns_1@127.0.0.1:<0.32198.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:42] [ns_1@127.0.0.1:<0.32095.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:42] [ns_1@127.0.0.1:<0.32210.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:42] [ns_1@127.0.0.1:<0.32198.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:43] [ns_1@127.0.0.1:<0.32143.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:43] [ns_1@127.0.0.1:<0.32218.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:35:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.32198.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:35:44] [ns_1@127.0.0.1:<0.32109.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:44] [ns_1@127.0.0.1:<0.32223.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:35:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32204.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:35:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32236.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:35:45] [ns_1@127.0.0.1:<0.32156.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:45] [ns_1@127.0.0.1:<0.32230.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:46] [ns_1@127.0.0.1:<0.32120.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:46] [ns_1@127.0.0.1:<0.32237.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:47] [ns_1@127.0.0.1:<0.32172.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:47] [ns_1@127.0.0.1:<0.32245.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:48] [ns_1@127.0.0.1:<0.32138.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:48] [ns_1@127.0.0.1:<0.32249.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:49] [ns_1@127.0.0.1:<0.32183.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:49] [ns_1@127.0.0.1:<0.32256.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:49] [ns_1@127.0.0.1:<0.32266.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:35:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32236.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:35:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32270.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:35:50] [ns_1@127.0.0.1:<0.32150.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:50] [ns_1@127.0.0.1:<0.32260.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:50] [ns_1@127.0.0.1:<0.32266.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:51] [ns_1@127.0.0.1:<0.32194.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:51] [ns_1@127.0.0.1:<0.32274.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:51] [ns_1@127.0.0.1:<0.32266.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:52] [ns_1@127.0.0.1:<0.32163.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:52] [ns_1@127.0.0.1:<0.32280.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:52] [ns_1@127.0.0.1:<0.32266.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:53] [ns_1@127.0.0.1:<0.32212.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:53] [ns_1@127.0.0.1:<0.32287.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:35:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.32266.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:35:54] [ns_1@127.0.0.1:<0.32179.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:54] [ns_1@127.0.0.1:<0.32292.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:35:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32270.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:35:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32305.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:35:55] [ns_1@127.0.0.1:<0.32225.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:55] [ns_1@127.0.0.1:<0.32300.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:56] [ns_1@127.0.0.1:<0.32189.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:56] [ns_1@127.0.0.1:<0.32308.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:57] [ns_1@127.0.0.1:<0.32241.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:57] [ns_1@127.0.0.1:<0.32314.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:58] [ns_1@127.0.0.1:<0.32207.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:58] [ns_1@127.0.0.1:<0.32319.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:35:59] [ns_1@127.0.0.1:<0.32251.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:35:59] [ns_1@127.0.0.1:<0.32349.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:35:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,750955,210300}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37873176}, {processes,10330384}, {processes_used,8704904}, {system,27542792}, {atom,1306681}, {atom_used,1284164}, {binary,559256}, {code,12859877}, {ets,2085368}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,1976}, {memory_data,{4040077312,4014202880,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25144 kB\nBuffers: 57572 kB\nCached: 3533888 kB\nSwapCached: 0 kB\nActive: 305760 kB\nInactive: 3446352 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25144 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 4 kB\nWriteback: 0 kB\nAnonPages: 160676 kB\nMapped: 24868 kB\nSlab: 134404 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582560 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3618701312}, {buffered_memory,58953728}, {free_memory,25747456}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{1971947,0}}, {context_switches,{832192,0}}, {garbage_collection,{427757,540016959,0}}, {io,{{input,19799344},{output,28380533}}}, {reductions,{184919668,637167}}, {run_queue,0}, {runtime,{33270,160}}]}]}] [stats:error] [2012-03-26 1:35:59] [ns_1@127.0.0.1:<0.32325.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:36:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32305.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:36:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32356.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:36:00] [ns_1@127.0.0.1:<0.32220.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:00] [ns_1@127.0.0.1:<0.32349.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:00] [ns_1@127.0.0.1:<0.32329.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:01] [ns_1@127.0.0.1:<0.32262.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:01] [ns_1@127.0.0.1:<0.32349.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:01] [ns_1@127.0.0.1:<0.32359.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:02] [ns_1@127.0.0.1:<0.32232.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:02] [ns_1@127.0.0.1:<0.32349.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:02] [ns_1@127.0.0.1:<0.32364.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:03] [ns_1@127.0.0.1:<0.32282.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:36:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.32349.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:36:03] [ns_1@127.0.0.1:<0.32371.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:04] [ns_1@127.0.0.1:<0.32247.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:04] [ns_1@127.0.0.1:<0.32377.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:36:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32356.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:36:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32389.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:36:05] [ns_1@127.0.0.1:<0.32296.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:05] [ns_1@127.0.0.1:<0.32384.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:06] [ns_1@127.0.0.1:<0.32258.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:06] [ns_1@127.0.0.1:<0.32393.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:07] [ns_1@127.0.0.1:<0.32312.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:07] [ns_1@127.0.0.1:<0.32400.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:08] [ns_1@127.0.0.1:<0.32276.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:08] [ns_1@127.0.0.1:<0.32289.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:08] [ns_1@127.0.0.1:<0.32302.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:09] [ns_1@127.0.0.1:<0.32404.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:09] [ns_1@127.0.0.1:<0.32323.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:09] [ns_1@127.0.0.1:<0.32425.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:10] [ns_1@127.0.0.1:<0.32410.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:36:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32389.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:36:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32431.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:36:10] [ns_1@127.0.0.1:<0.32317.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:10] [ns_1@127.0.0.1:<0.32425.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:11] [ns_1@127.0.0.1:<0.32419.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:11] [ns_1@127.0.0.1:<0.32353.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:11] [ns_1@127.0.0.1:<0.32425.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:12] [ns_1@127.0.0.1:<0.32412.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:12] [ns_1@127.0.0.1:<0.32327.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:12] [ns_1@127.0.0.1:<0.32425.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:13] [ns_1@127.0.0.1:<0.32439.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:13] [ns_1@127.0.0.1:<0.32369.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:36:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.32425.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:36:14] [ns_1@127.0.0.1:<0.32414.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:14] [ns_1@127.0.0.1:<0.32362.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:15] [ns_1@127.0.0.1:<0.32452.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:36:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32431.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:36:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32465.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:36:15] [ns_1@127.0.0.1:<0.32382.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:16] [ns_1@127.0.0.1:<0.32434.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:16] [ns_1@127.0.0.1:<0.32375.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:17] [ns_1@127.0.0.1:<0.32468.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:17] [ns_1@127.0.0.1:<0.32398.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:18] [ns_1@127.0.0.1:<0.32447.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:18] [ns_1@127.0.0.1:<0.32386.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:19] [ns_1@127.0.0.1:<0.32478.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:19] [ns_1@127.0.0.1:<0.32408.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:19] [ns_1@127.0.0.1:<0.32493.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:36:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32465.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:36:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32497.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:36:20] [ns_1@127.0.0.1:<0.32459.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:20] [ns_1@127.0.0.1:<0.32402.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:20] [ns_1@127.0.0.1:<0.32493.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:21] [ns_1@127.0.0.1:<0.32489.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:21] [ns_1@127.0.0.1:<0.32428.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:21] [ns_1@127.0.0.1:<0.32493.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:22] [ns_1@127.0.0.1:<0.32474.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:22] [ns_1@127.0.0.1:<0.32417.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:22] [ns_1@127.0.0.1:<0.32493.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:23] [ns_1@127.0.0.1:<0.32509.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:23] [ns_1@127.0.0.1:<0.32445.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:36:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.32493.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:36:24] [ns_1@127.0.0.1:<0.32485.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:24] [ns_1@127.0.0.1:<0.32437.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:36:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32497.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:36:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32532.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:36:25] [ns_1@127.0.0.1:<0.32521.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:25] [ns_1@127.0.0.1:<0.32457.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:26] [ns_1@127.0.0.1:<0.32503.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:26] [ns_1@127.0.0.1:<0.32450.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:27] [ns_1@127.0.0.1:<0.32537.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:27] [ns_1@127.0.0.1:<0.32472.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:28] [ns_1@127.0.0.1:<0.32516.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:28] [ns_1@127.0.0.1:<0.32462.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:29] [ns_1@127.0.0.1:<0.32550.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:29] [ns_1@127.0.0.1:<0.32560.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:29] [ns_1@127.0.0.1:<0.32483.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:36:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32532.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:36:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32567.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:36:30] [ns_1@127.0.0.1:<0.32529.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:30] [ns_1@127.0.0.1:<0.32560.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:30] [ns_1@127.0.0.1:<0.32476.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:31] [ns_1@127.0.0.1:<0.32563.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:31] [ns_1@127.0.0.1:<0.32560.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:31] [ns_1@127.0.0.1:<0.32498.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:32] [ns_1@127.0.0.1:<0.32543.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:32] [ns_1@127.0.0.1:<0.32560.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:32] [ns_1@127.0.0.1:<0.32487.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:33] [ns_1@127.0.0.1:<0.32580.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:36:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.32560.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:36:33] [ns_1@127.0.0.1:<0.32514.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:34] [ns_1@127.0.0.1:<0.32554.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:34] [ns_1@127.0.0.1:<0.32507.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:36:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32567.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:36:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32600.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:36:35] [ns_1@127.0.0.1:<0.32593.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:35] [ns_1@127.0.0.1:<0.32527.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:36] [ns_1@127.0.0.1:<0.32573.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:36] [ns_1@127.0.0.1:<0.32519.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:37] [ns_1@127.0.0.1:<0.32609.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:37] [ns_1@127.0.0.1:<0.32541.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:38] [ns_1@127.0.0.1:<0.32586.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:38] [ns_1@127.0.0.1:<0.32535.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:39] [ns_1@127.0.0.1:<0.32619.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:39] [ns_1@127.0.0.1:<0.32630.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:39] [ns_1@127.0.0.1:<0.32552.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:36:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32600.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:36:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32636.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:36:40] [ns_1@127.0.0.1:<0.32597.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:40] [ns_1@127.0.0.1:<0.32630.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:40] [ns_1@127.0.0.1:<0.32546.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:41] [ns_1@127.0.0.1:<0.32633.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:41] [ns_1@127.0.0.1:<0.32630.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:41] [ns_1@127.0.0.1:<0.32570.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:42] [ns_1@127.0.0.1:<0.32613.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:42] [ns_1@127.0.0.1:<0.32630.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:42] [ns_1@127.0.0.1:<0.32556.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:43] [ns_1@127.0.0.1:<0.32650.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:36:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.32630.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:36:44] [ns_1@127.0.0.1:<0.32582.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:44] [ns_1@127.0.0.1:<0.32624.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:45] [ns_1@127.0.0.1:<0.32575.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:36:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32636.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:36:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32670.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:36:45] [ns_1@127.0.0.1:<0.32662.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:46] [ns_1@127.0.0.1:<0.32595.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:46] [ns_1@127.0.0.1:<0.32642.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:47] [ns_1@127.0.0.1:<0.32588.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:47] [ns_1@127.0.0.1:<0.32677.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:48] [ns_1@127.0.0.1:<0.32611.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:48] [ns_1@127.0.0.1:<0.32655.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:49] [ns_1@127.0.0.1:<0.32604.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:49] [ns_1@127.0.0.1:<0.32688.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:49] [ns_1@127.0.0.1:<0.32698.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:50] [ns_1@127.0.0.1:<0.32621.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:36:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32670.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:36:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32704.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:36:50] [ns_1@127.0.0.1:<0.32667.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:50] [ns_1@127.0.0.1:<0.32698.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:51] [ns_1@127.0.0.1:<0.32615.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:51] [ns_1@127.0.0.1:<0.32701.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:51] [ns_1@127.0.0.1:<0.32698.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:52] [ns_1@127.0.0.1:<0.32639.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:52] [ns_1@127.0.0.1:<0.32681.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:52] [ns_1@127.0.0.1:<0.32698.0>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:53] [ns_1@127.0.0.1:<0.32626.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:53] [ns_1@127.0.0.1:<0.32719.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:36:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.32698.0> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:36:54] [ns_1@127.0.0.1:<0.32652.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:54] [ns_1@127.0.0.1:<0.32692.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:36:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32704.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:36:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32737.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:36:55] [ns_1@127.0.0.1:<0.32644.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:55] [ns_1@127.0.0.1:<0.32732.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:56] [ns_1@127.0.0.1:<0.32664.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:56] [ns_1@127.0.0.1:<0.32712.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:57] [ns_1@127.0.0.1:<0.32657.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:57] [ns_1@127.0.0.1:<0.32746.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:58] [ns_1@127.0.0.1:<0.32679.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:58] [ns_1@127.0.0.1:<0.32724.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:59] [ns_1@127.0.0.1:<0.32673.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:36:59] [ns_1@127.0.0.1:<0.32757.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:36:59] [ns_1@127.0.0.1:<0.0.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:36:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,751015,239356}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37992384}, {processes,10416824}, {processes_used,8791344}, {system,27575560}, {atom,1306681}, {atom_used,1284164}, {binary,554032}, {code,12859877}, {ets,2116744}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,2036}, {memory_data,{4040077312,4014329856,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 24896 kB\nBuffers: 57732 kB\nCached: 3534040 kB\nSwapCached: 0 kB\nActive: 305736 kB\nInactive: 3446728 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 24896 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 68 kB\nWriteback: 0 kB\nAnonPages: 160696 kB\nMapped: 24868 kB\nSlab: 134432 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582560 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3618856960}, {buffered_memory,59117568}, {free_memory,25493504}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{2031976,0}}, {context_switches,{845600,0}}, {garbage_collection,{435001,551361532,0}}, {io,{{input,20075959},{output,29049713}}}, {reductions,{187569170,649805}}, {run_queue,0}, {runtime,{33880,160}}]}]}] [error_logger:error] [2012-03-26 1:37:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32737.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:37:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:37:00] [ns_1@127.0.0.1:<0.32690.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:00] [ns_1@127.0.0.1:<0.32738.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:00] [ns_1@127.0.0.1:<0.0.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:01] [ns_1@127.0.0.1:<0.32683.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:01] [ns_1@127.0.0.1:<0.8.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:01] [ns_1@127.0.0.1:<0.0.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:02] [ns_1@127.0.0.1:<0.32708.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:02] [ns_1@127.0.0.1:<0.32751.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:02] [ns_1@127.0.0.1:<0.0.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:03] [ns_1@127.0.0.1:<0.32694.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:37:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.0.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:37:03] [ns_1@127.0.0.1:<0.20.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:04] [ns_1@127.0.0.1:<0.32721.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:04] [ns_1@127.0.0.1:<0.32761.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:37:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:37:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.38.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:37:05] [ns_1@127.0.0.1:<0.32714.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:05] [ns_1@127.0.0.1:<0.33.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:06] [ns_1@127.0.0.1:<0.32734.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:06] [ns_1@127.0.0.1:<0.13.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:07] [ns_1@127.0.0.1:<0.32726.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:07] [ns_1@127.0.0.1:<0.49.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:08] [ns_1@127.0.0.1:<0.32748.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:08] [ns_1@127.0.0.1:<0.26.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:08] [ns_1@127.0.0.1:<0.42.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:08] [ns_1@127.0.0.1:<0.53.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:09] [ns_1@127.0.0.1:<0.32742.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:09] [ns_1@127.0.0.1:<0.74.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:09] [ns_1@127.0.0.1:<0.59.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:37:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.38.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:37:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.80.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:37:10] [ns_1@127.0.0.1:<0.32759.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:10] [ns_1@127.0.0.1:<0.74.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:10] [ns_1@127.0.0.1:<0.68.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:11] [ns_1@127.0.0.1:<0.32753.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:11] [ns_1@127.0.0.1:<0.74.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:11] [ns_1@127.0.0.1:<0.83.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:12] [ns_1@127.0.0.1:<0.10.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:12] [ns_1@127.0.0.1:<0.74.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:12] [ns_1@127.0.0.1:<0.88.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:13] [ns_1@127.0.0.1:<0.32763.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:37:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.74.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:37:13] [ns_1@127.0.0.1:<0.96.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:14] [ns_1@127.0.0.1:<0.22.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:14] [ns_1@127.0.0.1:<0.101.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:37:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.80.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:37:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.114.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:37:15] [ns_1@127.0.0.1:<0.15.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:15] [ns_1@127.0.0.1:<0.108.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:16] [ns_1@127.0.0.1:<0.35.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:16] [ns_1@127.0.0.1:<0.117.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:17] [ns_1@127.0.0.1:<0.31.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:18] [ns_1@127.0.0.1:<0.123.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:18] [ns_1@127.0.0.1:<0.51.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:19] [ns_1@127.0.0.1:<0.127.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:19] [ns_1@127.0.0.1:<0.47.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:19] [ns_1@127.0.0.1:<0.142.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:20] [ns_1@127.0.0.1:<0.134.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:37:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.114.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:37:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.148.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:37:20] [ns_1@127.0.0.1:<0.62.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:20] [ns_1@127.0.0.1:<0.142.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:21] [ns_1@127.0.0.1:<0.138.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:21] [ns_1@127.0.0.1:<0.57.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:21] [ns_1@127.0.0.1:<0.142.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:22] [ns_1@127.0.0.1:<0.152.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:22] [ns_1@127.0.0.1:<0.64.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:22] [ns_1@127.0.0.1:<0.142.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:23] [ns_1@127.0.0.1:<0.158.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:23] [ns_1@127.0.0.1:<0.77.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:37:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.142.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:37:24] [ns_1@127.0.0.1:<0.165.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:24] [ns_1@127.0.0.1:<0.66.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:25] [ns_1@127.0.0.1:<0.170.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:37:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.148.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:37:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.183.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:37:25] [ns_1@127.0.0.1:<0.94.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:26] [ns_1@127.0.0.1:<0.178.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:26] [ns_1@127.0.0.1:<0.86.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:27] [ns_1@127.0.0.1:<0.186.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:27] [ns_1@127.0.0.1:<0.106.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:28] [ns_1@127.0.0.1:<0.192.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:28] [ns_1@127.0.0.1:<0.99.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:29] [ns_1@127.0.0.1:<0.197.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:29] [ns_1@127.0.0.1:<0.121.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:29] [ns_1@127.0.0.1:<0.211.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:37:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.183.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:37:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.216.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:37:30] [ns_1@127.0.0.1:<0.203.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:30] [ns_1@127.0.0.1:<0.111.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:30] [ns_1@127.0.0.1:<0.211.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:31] [ns_1@127.0.0.1:<0.207.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:31] [ns_1@127.0.0.1:<0.132.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:31] [ns_1@127.0.0.1:<0.211.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:32] [ns_1@127.0.0.1:<0.221.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:32] [ns_1@127.0.0.1:<0.125.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:32] [ns_1@127.0.0.1:<0.211.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:33] [ns_1@127.0.0.1:<0.226.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:33] [ns_1@127.0.0.1:<0.145.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:37:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.211.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:37:34] [ns_1@127.0.0.1:<0.233.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:34] [ns_1@127.0.0.1:<0.136.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:37:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.216.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:37:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.249.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:37:35] [ns_1@127.0.0.1:<0.239.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:35] [ns_1@127.0.0.1:<0.163.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:36] [ns_1@127.0.0.1:<0.246.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:36] [ns_1@127.0.0.1:<0.156.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:37] [ns_1@127.0.0.1:<0.255.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:37] [ns_1@127.0.0.1:<0.176.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:38] [ns_1@127.0.0.1:<0.262.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:38] [ns_1@127.0.0.1:<0.168.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:39] [ns_1@127.0.0.1:<0.268.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:39] [ns_1@127.0.0.1:<0.279.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:39] [ns_1@127.0.0.1:<0.190.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:37:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.249.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:37:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.285.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:37:40] [ns_1@127.0.0.1:<0.272.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:40] [ns_1@127.0.0.1:<0.279.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:40] [ns_1@127.0.0.1:<0.180.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:41] [ns_1@127.0.0.1:<0.282.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:41] [ns_1@127.0.0.1:<0.279.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:41] [ns_1@127.0.0.1:<0.201.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:42] [ns_1@127.0.0.1:<0.291.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:42] [ns_1@127.0.0.1:<0.279.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:42] [ns_1@127.0.0.1:<0.195.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:43] [ns_1@127.0.0.1:<0.299.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:37:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.279.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:37:43] [ns_1@127.0.0.1:<0.217.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:44] [ns_1@127.0.0.1:<0.304.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:44] [ns_1@127.0.0.1:<0.205.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:37:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.285.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:37:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.319.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:37:45] [ns_1@127.0.0.1:<0.311.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:45] [ns_1@127.0.0.1:<0.231.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:46] [ns_1@127.0.0.1:<0.316.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:46] [ns_1@127.0.0.1:<0.224.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:47] [ns_1@127.0.0.1:<0.326.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:47] [ns_1@127.0.0.1:<0.244.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:48] [ns_1@127.0.0.1:<0.330.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:48] [ns_1@127.0.0.1:<0.237.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:49] [ns_1@127.0.0.1:<0.337.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:49] [ns_1@127.0.0.1:<0.347.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:49] [ns_1@127.0.0.1:<0.260.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:37:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.319.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:37:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.353.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:37:50] [ns_1@127.0.0.1:<0.341.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:50] [ns_1@127.0.0.1:<0.347.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:50] [ns_1@127.0.0.1:<0.253.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:51] [ns_1@127.0.0.1:<0.350.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:51] [ns_1@127.0.0.1:<0.347.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:51] [ns_1@127.0.0.1:<0.270.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:52] [ns_1@127.0.0.1:<0.361.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:52] [ns_1@127.0.0.1:<0.347.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:53] [ns_1@127.0.0.1:<0.264.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:53] [ns_1@127.0.0.1:<0.368.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:37:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.347.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:37:54] [ns_1@127.0.0.1:<0.288.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:54] [ns_1@127.0.0.1:<0.373.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:55] [ns_1@127.0.0.1:<0.275.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:37:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.353.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:37:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.388.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:37:55] [ns_1@127.0.0.1:<0.381.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:56] [ns_1@127.0.0.1:<0.301.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:56] [ns_1@127.0.0.1:<0.385.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:57] [ns_1@127.0.0.1:<0.293.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:57] [ns_1@127.0.0.1:<0.395.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:58] [ns_1@127.0.0.1:<0.313.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:58] [ns_1@127.0.0.1:<0.400.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:59] [ns_1@127.0.0.1:<0.306.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:37:59] [ns_1@127.0.0.1:<0.406.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:37:59] [ns_1@127.0.0.1:<0.417.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:37:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,751075,269535}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37763424}, {processes,10211224}, {processes_used,8585744}, {system,27552200}, {atom,1306681}, {atom_used,1284164}, {binary,552104}, {code,12859877}, {ets,2088136}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,2097}, {memory_data,{4040077312,4014710784,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 24276 kB\nBuffers: 57824 kB\nCached: 3534204 kB\nSwapCached: 0 kB\nActive: 305956 kB\nInactive: 3446784 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 24276 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 84 kB\nWriteback: 0 kB\nAnonPages: 160704 kB\nMapped: 24868 kB\nSlab: 134436 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582560 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3619024896}, {buffered_memory,59211776}, {free_memory,24858624}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{2092004,0}}, {context_switches,{858592,0}}, {garbage_collection,{441988,562519115,0}}, {io,{{input,20106548},{output,29488513}}}, {reductions,{190148208,648652}}, {run_queue,0}, {runtime,{34500,150}}]}]}] [stats:error] [2012-03-26 1:38:00] [ns_1@127.0.0.1:<0.328.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:38:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.388.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:38:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.424.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:38:00] [ns_1@127.0.0.1:<0.410.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:00] [ns_1@127.0.0.1:<0.417.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:01] [ns_1@127.0.0.1:<0.322.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:01] [ns_1@127.0.0.1:<0.421.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:01] [ns_1@127.0.0.1:<0.417.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:02] [ns_1@127.0.0.1:<0.339.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:02] [ns_1@127.0.0.1:<0.430.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:02] [ns_1@127.0.0.1:<0.417.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:03] [ns_1@127.0.0.1:<0.332.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:03] [ns_1@127.0.0.1:<0.437.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:38:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.417.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:38:04] [ns_1@127.0.0.1:<0.357.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:04] [ns_1@127.0.0.1:<0.443.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:38:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.424.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:38:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.455.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:38:05] [ns_1@127.0.0.1:<0.343.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:05] [ns_1@127.0.0.1:<0.450.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:06] [ns_1@127.0.0.1:<0.370.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:06] [ns_1@127.0.0.1:<0.456.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:07] [ns_1@127.0.0.1:<0.363.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:07] [ns_1@127.0.0.1:<0.466.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:08] [ns_1@127.0.0.1:<0.383.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:08] [ns_1@127.0.0.1:<0.470.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:09] [ns_1@127.0.0.1:<0.375.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:09] [ns_1@127.0.0.1:<0.391.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:09] [ns_1@127.0.0.1:<0.402.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:09] [ns_1@127.0.0.1:<0.412.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:09] [ns_1@127.0.0.1:<0.432.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:09] [ns_1@127.0.0.1:<0.496.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:09] [ns_1@127.0.0.1:<0.476.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:38:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.455.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:38:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.501.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:38:10] [ns_1@127.0.0.1:<0.397.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:10] [ns_1@127.0.0.1:<0.496.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:10] [ns_1@127.0.0.1:<0.481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:11] [ns_1@127.0.0.1:<0.445.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:11] [ns_1@127.0.0.1:<0.496.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:11] [ns_1@127.0.0.1:<0.504.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:12] [ns_1@127.0.0.1:<0.408.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:12] [ns_1@127.0.0.1:<0.496.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:12] [ns_1@127.0.0.1:<0.483.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:13] [ns_1@127.0.0.1:<0.461.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:38:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.496.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:38:13] [ns_1@127.0.0.1:<0.517.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:14] [ns_1@127.0.0.1:<0.427.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:14] [ns_1@127.0.0.1:<0.485.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:38:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.501.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:38:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.535.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:38:15] [ns_1@127.0.0.1:<0.472.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:15] [ns_1@127.0.0.1:<0.529.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:16] [ns_1@127.0.0.1:<0.439.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:16] [ns_1@127.0.0.1:<0.487.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:17] [ns_1@127.0.0.1:<0.495.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:17] [ns_1@127.0.0.1:<0.544.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:18] [ns_1@127.0.0.1:<0.452.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:18] [ns_1@127.0.0.1:<0.489.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:19] [ns_1@127.0.0.1:<0.514.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:19] [ns_1@127.0.0.1:<0.563.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:19] [ns_1@127.0.0.1:<0.555.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:38:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.535.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:38:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.569.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:38:20] [ns_1@127.0.0.1:<0.468.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:20] [ns_1@127.0.0.1:<0.563.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:20] [ns_1@127.0.0.1:<0.509.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:21] [ns_1@127.0.0.1:<0.527.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:21] [ns_1@127.0.0.1:<0.563.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:21] [ns_1@127.0.0.1:<0.573.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:22] [ns_1@127.0.0.1:<0.478.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:22] [ns_1@127.0.0.1:<0.563.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:22] [ns_1@127.0.0.1:<0.522.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:23] [ns_1@127.0.0.1:<0.542.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:38:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.563.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:38:23] [ns_1@127.0.0.1:<0.586.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:24] [ns_1@127.0.0.1:<0.507.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:24] [ns_1@127.0.0.1:<0.538.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:38:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.569.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:38:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.604.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:38:25] [ns_1@127.0.0.1:<0.553.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:26] [ns_1@127.0.0.1:<0.599.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:26] [ns_1@127.0.0.1:<0.520.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:27] [ns_1@127.0.0.1:<0.548.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:27] [ns_1@127.0.0.1:<0.566.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:28] [ns_1@127.0.0.1:<0.613.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:28] [ns_1@127.0.0.1:<0.532.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:29] [ns_1@127.0.0.1:<0.559.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:29] [ns_1@127.0.0.1:<0.584.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:29] [ns_1@127.0.0.1:<0.632.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:30] [ns_1@127.0.0.1:<0.624.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:38:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.604.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:38:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.639.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:38:30] [ns_1@127.0.0.1:<0.546.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:30] [ns_1@127.0.0.1:<0.632.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:31] [ns_1@127.0.0.1:<0.579.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:31] [ns_1@127.0.0.1:<0.597.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:31] [ns_1@127.0.0.1:<0.632.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:32] [ns_1@127.0.0.1:<0.642.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:32] [ns_1@127.0.0.1:<0.557.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:32] [ns_1@127.0.0.1:<0.632.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:33] [ns_1@127.0.0.1:<0.591.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:33] [ns_1@127.0.0.1:<0.611.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:38:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.632.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:38:34] [ns_1@127.0.0.1:<0.654.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:34] [ns_1@127.0.0.1:<0.577.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:35] [ns_1@127.0.0.1:<0.607.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:38:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.639.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:38:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.672.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:38:35] [ns_1@127.0.0.1:<0.622.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:36] [ns_1@127.0.0.1:<0.667.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:36] [ns_1@127.0.0.1:<0.589.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:37] [ns_1@127.0.0.1:<0.618.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:37] [ns_1@127.0.0.1:<0.636.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:38] [ns_1@127.0.0.1:<0.683.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:38] [ns_1@127.0.0.1:<0.601.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:39] [ns_1@127.0.0.1:<0.628.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:39] [ns_1@127.0.0.1:<0.652.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:39] [ns_1@127.0.0.1:<0.702.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:38:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.672.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:38:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.706.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:38:40] [ns_1@127.0.0.1:<0.693.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:40] [ns_1@127.0.0.1:<0.616.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:40] [ns_1@127.0.0.1:<0.702.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:41] [ns_1@127.0.0.1:<0.647.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:41] [ns_1@127.0.0.1:<0.665.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:41] [ns_1@127.0.0.1:<0.702.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:42] [ns_1@127.0.0.1:<0.711.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:42] [ns_1@127.0.0.1:<0.626.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:42] [ns_1@127.0.0.1:<0.702.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:43] [ns_1@127.0.0.1:<0.660.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:43] [ns_1@127.0.0.1:<0.681.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:38:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.702.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:38:44] [ns_1@127.0.0.1:<0.724.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:44] [ns_1@127.0.0.1:<0.645.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:38:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.706.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:38:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.740.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:38:45] [ns_1@127.0.0.1:<0.676.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:45] [ns_1@127.0.0.1:<0.691.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:46] [ns_1@127.0.0.1:<0.736.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:46] [ns_1@127.0.0.1:<0.658.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:47] [ns_1@127.0.0.1:<0.687.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:47] [ns_1@127.0.0.1:<0.707.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:48] [ns_1@127.0.0.1:<0.751.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:48] [ns_1@127.0.0.1:<0.669.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:49] [ns_1@127.0.0.1:<0.698.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:49] [ns_1@127.0.0.1:<0.768.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:49] [ns_1@127.0.0.1:<0.722.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:38:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.740.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:38:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.774.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:38:50] [ns_1@127.0.0.1:<0.762.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:50] [ns_1@127.0.0.1:<0.768.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:50] [ns_1@127.0.0.1:<0.685.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:51] [ns_1@127.0.0.1:<0.716.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:51] [ns_1@127.0.0.1:<0.768.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:51] [ns_1@127.0.0.1:<0.734.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:52] [ns_1@127.0.0.1:<0.782.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:52] [ns_1@127.0.0.1:<0.768.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:52] [ns_1@127.0.0.1:<0.696.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:53] [ns_1@127.0.0.1:<0.729.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:38:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.768.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:38:53] [ns_1@127.0.0.1:<0.749.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:54] [ns_1@127.0.0.1:<0.794.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:54] [ns_1@127.0.0.1:<0.714.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:38:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.774.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:38:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.809.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:38:55] [ns_1@127.0.0.1:<0.747.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:55] [ns_1@127.0.0.1:<0.760.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:56] [ns_1@127.0.0.1:<0.806.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:56] [ns_1@127.0.0.1:<0.727.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:57] [ns_1@127.0.0.1:<0.757.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:57] [ns_1@127.0.0.1:<0.778.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:58] [ns_1@127.0.0.1:<0.821.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:58] [ns_1@127.0.0.1:<0.741.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:38:59] [ns_1@127.0.0.1:<0.771.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:38:59] [ns_1@127.0.0.1:<0.851.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:38:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,751135,298300}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37892632}, {processes,10298792}, {processes_used,8673312}, {system,27593840}, {atom,1306681}, {atom_used,1284164}, {binary,558112}, {code,12859877}, {ets,2116960}]}, {system_stats, [{cpu_utilization_rate,25.56390977443609}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,2157}, {memory_data,{4040077312,4015218688,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 24152 kB\nBuffers: 57920 kB\nCached: 3534364 kB\nSwapCached: 0 kB\nActive: 306104 kB\nInactive: 3446912 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 24152 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 72 kB\nWriteback: 0 kB\nAnonPages: 160724 kB\nMapped: 24868 kB\nSlab: 134436 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580304 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3619188736}, {buffered_memory,59310080}, {free_memory,24731648}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{2152034,1}}, {context_switches,{871705,0}}, {garbage_collection,{449168,573911568,0}}, {io,{{input,20137155},{output,29934397}}}, {reductions,{192767661,651257}}, {run_queue,0}, {runtime,{35060,140}}]}]}] [stats:error] [2012-03-26 1:38:59] [ns_1@127.0.0.1:<0.791.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:39:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.809.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:39:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.858.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:39:00] [ns_1@127.0.0.1:<0.831.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:00] [ns_1@127.0.0.1:<0.851.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:00] [ns_1@127.0.0.1:<0.753.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:01] [ns_1@127.0.0.1:<0.789.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:01] [ns_1@127.0.0.1:<0.851.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:02] [ns_1@127.0.0.1:<0.804.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:02] [ns_1@127.0.0.1:<0.864.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:02] [ns_1@127.0.0.1:<0.851.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:03] [ns_1@127.0.0.1:<0.764.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:03] [ns_1@127.0.0.1:<0.802.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:39:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.851.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:39:04] [ns_1@127.0.0.1:<0.818.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:04] [ns_1@127.0.0.1:<0.877.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:05] [ns_1@127.0.0.1:<0.784.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:39:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.858.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:39:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.891.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:39:05] [ns_1@127.0.0.1:<0.816.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:06] [ns_1@127.0.0.1:<0.829.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:06] [ns_1@127.0.0.1:<0.888.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:07] [ns_1@127.0.0.1:<0.796.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:07] [ns_1@127.0.0.1:<0.827.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:08] [ns_1@127.0.0.1:<0.861.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:08] [ns_1@127.0.0.1:<0.904.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:09] [ns_1@127.0.0.1:<0.812.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:09] [ns_1@127.0.0.1:<0.855.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:09] [ns_1@127.0.0.1:<0.900.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:09] [ns_1@127.0.0.1:<0.910.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:09] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:39:09] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:39:09] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:39:09] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:39:09] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:warn] [2012-03-26 1:39:14] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 1:39:14] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:39:14] [ns_1@127.0.0.1:<0.873.1>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 1:39:14] [ns_1@127.0.0.1:<0.866.1>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:39:14] [ns_1@127.0.0.1:<0.957.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:14] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:39:14] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:39:14] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:39:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:39:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:39:14] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [stats:error] [2012-03-26 1:39:14] [ns_1@127.0.0.1:<0.879.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:14] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:39:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:39:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:39:14] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:39:16: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [error_logger:error] [2012-03-26 1:39:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.891.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:39:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.969.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:39:15] [ns_1@127.0.0.1:<0.895.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:15] [ns_1@127.0.0.1:<0.957.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:15] [ns_1@127.0.0.1:<0.886.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:16] [ns_1@127.0.0.1:<0.902.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:16] [ns_1@127.0.0.1:<0.957.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:16] [ns_1@127.0.0.1:<0.970.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:17] [ns_1@127.0.0.1:<0.906.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:17] [ns_1@127.0.0.1:<0.957.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:17] [ns_1@127.0.0.1:<0.979.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:18] [ns_1@127.0.0.1:<0.912.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:18] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:39:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.957.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:39:18] [ns_1@127.0.0.1:<0.984.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:19] [ns_1@127.0.0.1:<0.917.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:19] [ns_1@127.0.0.1:<0.992.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:19] [ns_1@127.0.0.1:<0.1003.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:39:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.969.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:39:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1007.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:39:20] [ns_1@127.0.0.1:<0.950.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:20] [ns_1@127.0.0.1:<0.1003.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:21] [ns_1@127.0.0.1:<0.1003.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:22] [ns_1@127.0.0.1:<0.1003.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [stats:error] [2012-03-26 1:39:20] [ns_1@127.0.0.1:<0.997.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:39:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1003.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:39:25] [ns_1@127.0.0.1:<0.919.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:39:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1007.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:39:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1028.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:39:25] [ns_1@127.0.0.1:<0.922.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:26] [ns_1@127.0.0.1:<0.1008.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:26] [ns_1@127.0.0.1:<0.951.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:27] [ns_1@127.0.0.1:<0.1031.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:27] [ns_1@127.0.0.1:<0.925.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:28] [ns_1@127.0.0.1:<0.1037.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:28] [ns_1@127.0.0.1:<0.952.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:29] [ns_1@127.0.0.1:<0.1042.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:29] [ns_1@127.0.0.1:<0.927.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:29] [ns_1@127.0.0.1:<0.1056.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:30] [ns_1@127.0.0.1:<0.1048.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:39:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1028.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:39:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1063.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:39:30] [ns_1@127.0.0.1:<0.953.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:30] [ns_1@127.0.0.1:<0.1056.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:31] [ns_1@127.0.0.1:<0.1052.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:31] [ns_1@127.0.0.1:<0.975.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:31] [ns_1@127.0.0.1:<0.1056.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:32] [ns_1@127.0.0.1:<0.1066.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:32] [ns_1@127.0.0.1:<0.961.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:32] [ns_1@127.0.0.1:<0.1056.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:33] [ns_1@127.0.0.1:<0.929.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:33] [ns_1@127.0.0.1:<0.987.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:39:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1056.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:39:34] [ns_1@127.0.0.1:<0.1078.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:34] [ns_1@127.0.0.1:<0.982.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:39:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1063.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:39:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1094.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:39:35] [ns_1@127.0.0.1:<0.1071.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:35] [ns_1@127.0.0.1:<0.999.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:36] [ns_1@127.0.0.1:<0.1091.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:36] [ns_1@127.0.0.1:<0.995.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:37] [ns_1@127.0.0.1:<0.1084.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:37] [ns_1@127.0.0.1:<0.1035.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:38] [ns_1@127.0.0.1:<0.1107.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:38] [ns_1@127.0.0.1:<0.1025.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:39] [ns_1@127.0.0.1:<0.1100.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:39] [ns_1@127.0.0.1:<0.1124.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:39] [ns_1@127.0.0.1:<0.1046.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:39:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1094.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:39:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1130.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:39:40] [ns_1@127.0.0.1:<0.1014.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:40] [ns_1@127.0.0.1:<0.1124.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:40] [ns_1@127.0.0.1:<0.1040.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:41] [ns_1@127.0.0.1:<0.1113.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:41] [ns_1@127.0.0.1:<0.1124.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:41] [ns_1@127.0.0.1:<0.1060.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:42] [ns_1@127.0.0.1:<0.1117.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:42] [ns_1@127.0.0.1:<0.1124.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:42] [ns_1@127.0.0.1:<0.1050.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:43] [ns_1@127.0.0.1:<0.1127.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:39:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1124.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:39:43] [ns_1@127.0.0.1:<0.1076.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:44] [ns_1@127.0.0.1:<0.1136.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:44] [ns_1@127.0.0.1:<0.1069.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:39:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1130.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:39:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1164.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:39:45] [ns_1@127.0.0.1:<0.1144.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:45] [ns_1@127.0.0.1:<0.1089.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:46] [ns_1@127.0.0.1:<0.1149.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:46] [ns_1@127.0.0.1:<0.1082.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:47] [ns_1@127.0.0.1:<0.1156.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:47] [ns_1@127.0.0.1:<0.1105.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:48] [ns_1@127.0.0.1:<0.1161.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:48] [ns_1@127.0.0.1:<0.1095.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:49] [ns_1@127.0.0.1:<0.1171.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:49] [ns_1@127.0.0.1:<0.1192.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:49] [ns_1@127.0.0.1:<0.1115.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:39:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1164.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:39:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1198.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:39:50] [ns_1@127.0.0.1:<0.1175.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:50] [ns_1@127.0.0.1:<0.1192.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:50] [ns_1@127.0.0.1:<0.1109.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:51] [ns_1@127.0.0.1:<0.1182.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:51] [ns_1@127.0.0.1:<0.1192.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:51] [ns_1@127.0.0.1:<0.1133.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:52] [ns_1@127.0.0.1:<0.1186.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:52] [ns_1@127.0.0.1:<0.1192.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:53] [ns_1@127.0.0.1:<0.1120.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:53] [ns_1@127.0.0.1:<0.1195.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:39:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1192.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:39:54] [ns_1@127.0.0.1:<0.1146.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:54] [ns_1@127.0.0.1:<0.1206.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:55] [ns_1@127.0.0.1:<0.1138.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:39:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1198.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:39:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1233.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:39:55] [ns_1@127.0.0.1:<0.1213.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:56] [ns_1@127.0.0.1:<0.1158.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:56] [ns_1@127.0.0.1:<0.1218.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:57] [ns_1@127.0.0.1:<0.1151.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:57] [ns_1@127.0.0.1:<0.1226.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:58] [ns_1@127.0.0.1:<0.1173.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:58] [ns_1@127.0.0.1:<0.1230.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:59] [ns_1@127.0.0.1:<0.1167.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:39:59] [ns_1@127.0.0.1:<0.1240.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:39:59] [ns_1@127.0.0.1:<0.1262.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:39:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,751195,327598}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37638928}, {processes,9971512}, {processes_used,8346032}, {system,27667416}, {atom,1306681}, {atom_used,1284164}, {binary,593096}, {code,12859877}, {ets,2148688}]}, {system_stats, [{cpu_utilization_rate,25.25}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,2217}, {memory_data,{4040077312,4015345664,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 28144 kB\nBuffers: 58080 kB\nCached: 3531048 kB\nSwapCached: 0 kB\nActive: 304688 kB\nInactive: 3443964 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 28144 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 76 kB\nWriteback: 0 kB\nAnonPages: 159544 kB\nMapped: 24868 kB\nSlab: 134412 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577792 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3615793152}, {buffered_memory,59473920}, {free_memory,28819456}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{2212062,1}}, {context_switches,{885012,0}}, {garbage_collection,{456547,585261400,0}}, {io,{{input,20419171},{output,30733668}}}, {reductions,{195499125,649972}}, {run_queue,0}, {runtime,{35670,140}}]}]}] [stats:error] [2012-03-26 1:40:00] [ns_1@127.0.0.1:<0.1184.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:40:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1233.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:40:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1269.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:40:00] [ns_1@127.0.0.1:<0.1245.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:00] [ns_1@127.0.0.1:<0.1262.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:01] [ns_1@127.0.0.1:<0.1177.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:01] [ns_1@127.0.0.1:<0.1251.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:01] [ns_1@127.0.0.1:<0.1262.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:02] [ns_1@127.0.0.1:<0.1202.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:02] [ns_1@127.0.0.1:<0.1255.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:02] [ns_1@127.0.0.1:<0.1262.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:03] [ns_1@127.0.0.1:<0.1188.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:03] [ns_1@127.0.0.1:<0.1266.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:40:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1262.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:40:04] [ns_1@127.0.0.1:<0.1215.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:04] [ns_1@127.0.0.1:<0.1275.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:05] [ns_1@127.0.0.1:<0.1208.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:40:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1269.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:40:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1302.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:40:05] [ns_1@127.0.0.1:<0.1282.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:06] [ns_1@127.0.0.1:<0.1228.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:06] [ns_1@127.0.0.1:<0.1288.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:07] [ns_1@127.0.0.1:<0.1220.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:07] [ns_1@127.0.0.1:<0.1295.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:08] [ns_1@127.0.0.1:<0.1242.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:08] [ns_1@127.0.0.1:<0.1299.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:09] [ns_1@127.0.0.1:<0.1236.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:09] [ns_1@127.0.0.1:<0.1311.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:09] [ns_1@127.0.0.1:<0.1334.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:40:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1302.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:40:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1338.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:40:10] [ns_1@127.0.0.1:<0.1253.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:10] [ns_1@127.0.0.1:<0.1315.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:10] [ns_1@127.0.0.1:<0.1334.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:11] [ns_1@127.0.0.1:<0.1247.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:11] [ns_1@127.0.0.1:<0.1321.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:11] [ns_1@127.0.0.1:<0.1334.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:12] [ns_1@127.0.0.1:<0.1272.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:12] [ns_1@127.0.0.1:<0.1326.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:12] [ns_1@127.0.0.1:<0.1334.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:13] [ns_1@127.0.0.1:<0.1257.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:40:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1334.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:40:13] [ns_1@127.0.0.1:<0.1339.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:14] [ns_1@127.0.0.1:<0.1284.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:14] [ns_1@127.0.0.1:<0.1346.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:40:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1338.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:40:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1372.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:40:19] [ns_1@127.0.0.1:<0.1382.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:40:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1372.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:40:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1386.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:40:20] [ns_1@127.0.0.1:<0.1382.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:21] [ns_1@127.0.0.1:<0.1382.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:22] [ns_1@127.0.0.1:<0.1382.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:40:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1382.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:40:24] [ns_1@127.0.0.1:<0.1359.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:40:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1386.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:40:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1403.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:40:25] [ns_1@127.0.0.1:<0.1277.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:25] [ns_1@127.0.0.1:<0.1354.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:26] [ns_1@127.0.0.1:<0.1297.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:26] [ns_1@127.0.0.1:<0.1404.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:27] [ns_1@127.0.0.1:<0.1290.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:27] [ns_1@127.0.0.1:<0.1366.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:28] [ns_1@127.0.0.1:<0.1313.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:28] [ns_1@127.0.0.1:<0.1417.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:29] [ns_1@127.0.0.1:<0.1306.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:29] [ns_1@127.0.0.1:<0.1431.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:29] [ns_1@127.0.0.1:<0.1412.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:40:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1403.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:40:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1438.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:40:30] [ns_1@127.0.0.1:<0.1323.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:30] [ns_1@127.0.0.1:<0.1431.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:30] [ns_1@127.0.0.1:<0.1427.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:31] [ns_1@127.0.0.1:<0.1317.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:31] [ns_1@127.0.0.1:<0.1431.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:31] [ns_1@127.0.0.1:<0.1423.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:32] [ns_1@127.0.0.1:<0.1343.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:32] [ns_1@127.0.0.1:<0.1431.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:32] [ns_1@127.0.0.1:<0.1446.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:33] [ns_1@127.0.0.1:<0.1328.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:40:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1431.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:40:33] [ns_1@127.0.0.1:<0.1441.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:34] [ns_1@127.0.0.1:<0.1356.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:34] [ns_1@127.0.0.1:<0.1459.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:40:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1438.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:40:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1471.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:40:35] [ns_1@127.0.0.1:<0.1348.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:35] [ns_1@127.0.0.1:<0.1453.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:36] [ns_1@127.0.0.1:<0.1368.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:36] [ns_1@127.0.0.1:<0.1475.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:37] [ns_1@127.0.0.1:<0.1364.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:38] [ns_1@127.0.0.1:<0.1466.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:38] [ns_1@127.0.0.1:<0.1400.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:39] [ns_1@127.0.0.1:<0.1486.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:39] [ns_1@127.0.0.1:<0.1410.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:39] [ns_1@127.0.0.1:<0.1501.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:40] [ns_1@127.0.0.1:<0.1482.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:40:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1471.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:40:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1507.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:40:40] [ns_1@127.0.0.1:<0.1415.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:40] [ns_1@127.0.0.1:<0.1501.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:41] [ns_1@127.0.0.1:<0.1497.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:41] [ns_1@127.0.0.1:<0.1421.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:41] [ns_1@127.0.0.1:<0.1501.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:42] [ns_1@127.0.0.1:<0.1492.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:42] [ns_1@127.0.0.1:<0.1425.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:42] [ns_1@127.0.0.1:<0.1501.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:43] [ns_1@127.0.0.1:<0.1515.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:43] [ns_1@127.0.0.1:<0.1435.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:40:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1501.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:40:44] [ns_1@127.0.0.1:<0.1510.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:44] [ns_1@127.0.0.1:<0.1444.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:45] [ns_1@127.0.0.1:<0.1528.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:40:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1507.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:40:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1541.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:40:45] [ns_1@127.0.0.1:<0.1451.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:46] [ns_1@127.0.0.1:<0.1523.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:46] [ns_1@127.0.0.1:<0.1457.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:47] [ns_1@127.0.0.1:<0.1544.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:47] [ns_1@127.0.0.1:<0.1464.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:48] [ns_1@127.0.0.1:<0.1535.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:48] [ns_1@127.0.0.1:<0.1468.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:49] [ns_1@127.0.0.1:<0.1554.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:49] [ns_1@127.0.0.1:<0.1480.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:49] [ns_1@127.0.0.1:<0.1569.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:50] [ns_1@127.0.0.1:<0.1550.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:40:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1541.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:40:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1575.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:40:50] [ns_1@127.0.0.1:<0.1484.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:50] [ns_1@127.0.0.1:<0.1569.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:51] [ns_1@127.0.0.1:<0.1565.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:51] [ns_1@127.0.0.1:<0.1490.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:51] [ns_1@127.0.0.1:<0.1569.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:52] [ns_1@127.0.0.1:<0.1561.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:52] [ns_1@127.0.0.1:<0.1495.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:52] [ns_1@127.0.0.1:<0.1569.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:53] [ns_1@127.0.0.1:<0.1585.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:53] [ns_1@127.0.0.1:<0.1504.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:40:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1569.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:40:54] [ns_1@127.0.0.1:<0.1579.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:54] [ns_1@127.0.0.1:<0.1513.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:40:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1575.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:40:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1608.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:40:55] [ns_1@127.0.0.1:<0.1597.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:55] [ns_1@127.0.0.1:<0.1521.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:56] [ns_1@127.0.0.1:<0.1592.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:56] [ns_1@127.0.0.1:<0.1526.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:57] [ns_1@127.0.0.1:<0.1613.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:57] [ns_1@127.0.0.1:<0.1533.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:58] [ns_1@127.0.0.1:<0.1605.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:58] [ns_1@127.0.0.1:<0.1538.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:40:59] [ns_1@127.0.0.1:<0.1626.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:40:59] [ns_1@127.0.0.1:<0.1637.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:40:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,751255,355253}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38012304}, {processes,10320680}, {processes_used,8695200}, {system,27691624}, {atom,1306681}, {atom_used,1284164}, {binary,582144}, {code,12859877}, {ets,2177416}]}, {system_stats, [{cpu_utilization_rate,25.43640897755611}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,2277}, {memory_data,{4040077312,4011257856,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 28020 kB\nBuffers: 58172 kB\nCached: 3531204 kB\nSwapCached: 0 kB\nActive: 304824 kB\nInactive: 3444144 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 28020 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 60 kB\nWriteback: 0 kB\nAnonPages: 159580 kB\nMapped: 24868 kB\nSlab: 134420 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 575380 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3615952896}, {buffered_memory,59568128}, {free_memory,28692480}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{2272092,0}}, {context_switches,{897139,0}}, {garbage_collection,{463432,595134166,0}}, {io,{{input,20449733},{output,31137737}}}, {reductions,{197892018,668651}}, {run_queue,0}, {runtime,{36190,140}}]}]}] [stats:error] [2012-03-26 1:40:59] [ns_1@127.0.0.1:<0.1548.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:41:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1608.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:41:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1644.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:41:00] [ns_1@127.0.0.1:<0.1619.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:00] [ns_1@127.0.0.1:<0.1637.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:00] [ns_1@127.0.0.1:<0.1552.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:01] [ns_1@127.0.0.1:<0.1640.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:01] [ns_1@127.0.0.1:<0.1637.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:01] [ns_1@127.0.0.1:<0.1559.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:02] [ns_1@127.0.0.1:<0.1630.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:02] [ns_1@127.0.0.1:<0.1637.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:02] [ns_1@127.0.0.1:<0.1563.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:03] [ns_1@127.0.0.1:<0.1657.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:41:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1637.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:41:03] [ns_1@127.0.0.1:<0.1572.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:04] [ns_1@127.0.0.1:<0.1650.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:04] [ns_1@127.0.0.1:<0.1583.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:41:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1644.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:41:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1677.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:41:05] [ns_1@127.0.0.1:<0.1670.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:05] [ns_1@127.0.0.1:<0.1590.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:06] [ns_1@127.0.0.1:<0.1663.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:06] [ns_1@127.0.0.1:<0.1595.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:07] [ns_1@127.0.0.1:<0.1686.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:07] [ns_1@127.0.0.1:<0.1603.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:08] [ns_1@127.0.0.1:<0.1674.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:08] [ns_1@127.0.0.1:<0.1609.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:09] [ns_1@127.0.0.1:<0.1696.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:09] [ns_1@127.0.0.1:<0.1709.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:09] [ns_1@127.0.0.1:<0.1617.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:41:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1677.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:41:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1715.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:41:10] [ns_1@127.0.0.1:<0.1690.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:10] [ns_1@127.0.0.1:<0.1709.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:10] [ns_1@127.0.0.1:<0.1622.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:11] [ns_1@127.0.0.1:<0.1712.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:11] [ns_1@127.0.0.1:<0.1709.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:11] [ns_1@127.0.0.1:<0.1628.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:12] [ns_1@127.0.0.1:<0.1701.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:12] [ns_1@127.0.0.1:<0.1709.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:12] [ns_1@127.0.0.1:<0.1632.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:13] [ns_1@127.0.0.1:<0.1729.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:41:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1709.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:41:14] [ns_1@127.0.0.1:<0.1645.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:14] [ns_1@127.0.0.1:<0.1721.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:15] [ns_1@127.0.0.1:<0.1652.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:41:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1715.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:41:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1749.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:41:15] [ns_1@127.0.0.1:<0.1741.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:16] [ns_1@127.0.0.1:<0.1659.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:16] [ns_1@127.0.0.1:<0.1734.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:17] [ns_1@127.0.0.1:<0.1665.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:17] [ns_1@127.0.0.1:<0.1756.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:18] [ns_1@127.0.0.1:<0.1672.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:18] [ns_1@127.0.0.1:<0.1746.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:19] [ns_1@127.0.0.1:<0.1681.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:19] [ns_1@127.0.0.1:<0.1767.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:19] [ns_1@127.0.0.1:<0.1777.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:20] [ns_1@127.0.0.1:<0.1688.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:41:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1749.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:41:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1783.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:41:20] [ns_1@127.0.0.1:<0.1760.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:20] [ns_1@127.0.0.1:<0.1777.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:21] [ns_1@127.0.0.1:<0.1692.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:21] [ns_1@127.0.0.1:<0.1780.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:21] [ns_1@127.0.0.1:<0.1777.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:22] [ns_1@127.0.0.1:<0.1698.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:22] [ns_1@127.0.0.1:<0.1771.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:22] [ns_1@127.0.0.1:<0.1777.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:23] [ns_1@127.0.0.1:<0.1703.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:23] [ns_1@127.0.0.1:<0.1798.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:41:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1777.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:41:24] [ns_1@127.0.0.1:<0.1718.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:24] [ns_1@127.0.0.1:<0.1791.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:25] [ns_1@127.0.0.1:<0.1723.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:25] [ns_1@127.0.0.1:<0.1736.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:25] [ns_1@127.0.0.1:<0.1752.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:41:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1783.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:41:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1822.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:41:25] [ns_1@127.0.0.1:<0.1811.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:26] [ns_1@127.0.0.1:<0.1731.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:26] [ns_1@127.0.0.1:<0.1803.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:27] [ns_1@127.0.0.1:<0.1762.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:27] [ns_1@127.0.0.1:<0.1829.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:28] [ns_1@127.0.0.1:<0.1743.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:28] [ns_1@127.0.0.1:<0.1815.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:29] [ns_1@127.0.0.1:<0.1773.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:29] [ns_1@127.0.0.1:<0.1840.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:29] [ns_1@127.0.0.1:<0.1850.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:41:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1822.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:41:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1855.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:41:30] [ns_1@127.0.0.1:<0.1758.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:30] [ns_1@127.0.0.1:<0.1817.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:30] [ns_1@127.0.0.1:<0.1850.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:31] [ns_1@127.0.0.1:<0.1793.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:31] [ns_1@127.0.0.1:<0.1850.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:31] [ns_1@127.0.0.1:<0.1856.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:32] [ns_1@127.0.0.1:<0.1769.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:32] [ns_1@127.0.0.1:<0.1850.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:32] [ns_1@127.0.0.1:<0.1819.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:33] [ns_1@127.0.0.1:<0.1805.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:41:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1850.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:41:33] [ns_1@127.0.0.1:<0.1870.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:34] [ns_1@127.0.0.1:<0.1787.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:34] [ns_1@127.0.0.1:<0.1834.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:41:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1855.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:41:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1888.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:41:35] [ns_1@127.0.0.1:<0.1825.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:35] [ns_1@127.0.0.1:<0.1883.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:36] [ns_1@127.0.0.1:<0.1800.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:36] [ns_1@127.0.0.1:<0.1844.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:37] [ns_1@127.0.0.1:<0.1836.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:37] [ns_1@127.0.0.1:<0.1899.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:38] [ns_1@127.0.0.1:<0.1813.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:38] [ns_1@127.0.0.1:<0.1863.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:39] [ns_1@127.0.0.1:<0.1846.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:39] [ns_1@127.0.0.1:<0.1918.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:39] [ns_1@127.0.0.1:<0.1909.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:41:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1888.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:41:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1924.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:41:40] [ns_1@127.0.0.1:<0.1831.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:40] [ns_1@127.0.0.1:<0.1918.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:40] [ns_1@127.0.0.1:<0.1876.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:41] [ns_1@127.0.0.1:<0.1868.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:41] [ns_1@127.0.0.1:<0.1918.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:41] [ns_1@127.0.0.1:<0.1927.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:42] [ns_1@127.0.0.1:<0.1842.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:42] [ns_1@127.0.0.1:<0.1918.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:42] [ns_1@127.0.0.1:<0.1889.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:43] [ns_1@127.0.0.1:<0.1881.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:41:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1918.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:41:43] [ns_1@127.0.0.1:<0.1940.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:44] [ns_1@127.0.0.1:<0.1860.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:44] [ns_1@127.0.0.1:<0.1903.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:41:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1924.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:41:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1958.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:41:45] [ns_1@127.0.0.1:<0.1897.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:45] [ns_1@127.0.0.1:<0.1952.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:46] [ns_1@127.0.0.1:<0.1873.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:46] [ns_1@127.0.0.1:<0.1914.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:47] [ns_1@127.0.0.1:<0.1907.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:48] [ns_1@127.0.0.1:<0.1967.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:48] [ns_1@127.0.0.1:<0.1885.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:49] [ns_1@127.0.0.1:<0.1932.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:49] [ns_1@127.0.0.1:<0.1921.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:49] [ns_1@127.0.0.1:<0.1986.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:50] [ns_1@127.0.0.1:<0.1978.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:41:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1958.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:41:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1992.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:41:50] [ns_1@127.0.0.1:<0.1901.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:50] [ns_1@127.0.0.1:<0.1986.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:51] [ns_1@127.0.0.1:<0.1945.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:51] [ns_1@127.0.0.1:<0.1938.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:51] [ns_1@127.0.0.1:<0.1986.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:52] [ns_1@127.0.0.1:<0.1996.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:52] [ns_1@127.0.0.1:<0.1912.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:52] [ns_1@127.0.0.1:<0.1986.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:53] [ns_1@127.0.0.1:<0.1961.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:53] [ns_1@127.0.0.1:<0.1950.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:41:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1986.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:41:54] [ns_1@127.0.0.1:<0.2009.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:54] [ns_1@127.0.0.1:<0.1930.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:55] [ns_1@127.0.0.1:<0.1971.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:41:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1992.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:41:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2027.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:41:55] [ns_1@127.0.0.1:<0.1965.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:56] [ns_1@127.0.0.1:<0.2022.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:56] [ns_1@127.0.0.1:<0.1943.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:57] [ns_1@127.0.0.1:<0.1982.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:57] [ns_1@127.0.0.1:<0.1976.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:58] [ns_1@127.0.0.1:<0.2036.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:58] [ns_1@127.0.0.1:<0.1955.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:59] [ns_1@127.0.0.1:<0.2002.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:41:59] [ns_1@127.0.0.1:<0.1989.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:41:59] [ns_1@127.0.0.1:<0.2071.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:41:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,751315,381253}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37901928}, {processes,10225168}, {processes_used,8599688}, {system,27676760}, {atom,1306681}, {atom_used,1284164}, {binary,588656}, {code,12859877}, {ets,2149008}]}, {system_stats, [{cpu_utilization_rate,25.12562814070352}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,2337}, {memory_data,{4040077312,4011638784,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27772 kB\nBuffers: 58248 kB\nCached: 3531356 kB\nSwapCached: 0 kB\nActive: 304968 kB\nInactive: 3444240 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27772 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 72 kB\nWriteback: 0 kB\nAnonPages: 159596 kB\nMapped: 24868 kB\nSlab: 134440 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577988 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616108544}, {buffered_memory,59645952}, {free_memory,28438528}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{2332118,0}}, {context_switches,{910159,0}}, {garbage_collection,{470388,606300026,0}}, {io,{{input,20480322},{output,31577521}}}, {reductions,{200465846,632068}}, {run_queue,0}, {runtime,{36790,140}}]}]}] [stats:error] [2012-03-26 1:42:00] [ns_1@127.0.0.1:<0.2047.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:42:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2027.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:42:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2078.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:42:00] [ns_1@127.0.0.1:<0.1969.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:00] [ns_1@127.0.0.1:<0.2071.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:01] [ns_1@127.0.0.1:<0.2014.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:01] [ns_1@127.0.0.1:<0.2007.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:01] [ns_1@127.0.0.1:<0.2071.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:02] [ns_1@127.0.0.1:<0.2081.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:02] [ns_1@127.0.0.1:<0.1980.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:02] [ns_1@127.0.0.1:<0.2071.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:03] [ns_1@127.0.0.1:<0.2030.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:03] [ns_1@127.0.0.1:<0.2020.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:42:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.2071.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:42:04] [ns_1@127.0.0.1:<0.2093.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:04] [ns_1@127.0.0.1:<0.2000.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:42:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2078.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:42:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2109.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:42:05] [ns_1@127.0.0.1:<0.2041.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:05] [ns_1@127.0.0.1:<0.2034.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:06] [ns_1@127.0.0.1:<0.2106.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:06] [ns_1@127.0.0.1:<0.2012.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:07] [ns_1@127.0.0.1:<0.2066.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:07] [ns_1@127.0.0.1:<0.2045.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:08] [ns_1@127.0.0.1:<0.2122.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:08] [ns_1@127.0.0.1:<0.2024.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:09] [ns_1@127.0.0.1:<0.2086.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:09] [ns_1@127.0.0.1:<0.2141.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:09] [ns_1@127.0.0.1:<0.2075.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:42:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2109.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:42:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2147.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:42:10] [ns_1@127.0.0.1:<0.2132.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:10] [ns_1@127.0.0.1:<0.2141.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:10] [ns_1@127.0.0.1:<0.2039.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:11] [ns_1@127.0.0.1:<0.2099.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:11] [ns_1@127.0.0.1:<0.2141.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:11] [ns_1@127.0.0.1:<0.2091.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:12] [ns_1@127.0.0.1:<0.2153.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:12] [ns_1@127.0.0.1:<0.2141.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:12] [ns_1@127.0.0.1:<0.2049.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:13] [ns_1@127.0.0.1:<0.2115.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:42:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.2141.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:42:13] [ns_1@127.0.0.1:<0.2104.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:14] [ns_1@127.0.0.1:<0.2166.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:14] [ns_1@127.0.0.1:<0.2084.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:42:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2147.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:42:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2181.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:42:15] [ns_1@127.0.0.1:<0.2128.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:15] [ns_1@127.0.0.1:<0.2120.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:16] [ns_1@127.0.0.1:<0.2178.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:16] [ns_1@127.0.0.1:<0.2097.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:17] [ns_1@127.0.0.1:<0.2144.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:17] [ns_1@127.0.0.1:<0.2130.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:18] [ns_1@127.0.0.1:<0.2192.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:18] [ns_1@127.0.0.1:<0.2110.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:19] [ns_1@127.0.0.1:<0.2161.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:19] [ns_1@127.0.0.1:<0.2209.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:19] [ns_1@127.0.0.1:<0.2148.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:42:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2181.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:42:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2215.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:42:20] [ns_1@127.0.0.1:<0.2203.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:20] [ns_1@127.0.0.1:<0.2209.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:20] [ns_1@127.0.0.1:<0.2124.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:21] [ns_1@127.0.0.1:<0.2173.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:21] [ns_1@127.0.0.1:<0.2209.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:21] [ns_1@127.0.0.1:<0.2163.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:22] [ns_1@127.0.0.1:<0.2223.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:22] [ns_1@127.0.0.1:<0.2209.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:23] [ns_1@127.0.0.1:<0.2135.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:23] [ns_1@127.0.0.1:<0.2188.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:42:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.2209.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:42:24] [ns_1@127.0.0.1:<0.2175.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:24] [ns_1@127.0.0.1:<0.2235.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:25] [ns_1@127.0.0.1:<0.2155.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:42:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2215.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:42:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2250.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:42:25] [ns_1@127.0.0.1:<0.2199.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:25] [ns_1@127.0.0.1:<0.2212.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:25] [ns_1@127.0.0.1:<0.2230.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:26] [ns_1@127.0.0.1:<0.2190.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:26] [ns_1@127.0.0.1:<0.2247.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:27] [ns_1@127.0.0.1:<0.2168.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:27] [ns_1@127.0.0.1:<0.2243.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:28] [ns_1@127.0.0.1:<0.2201.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:28] [ns_1@127.0.0.1:<0.2266.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:29] [ns_1@127.0.0.1:<0.2184.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:29] [ns_1@127.0.0.1:<0.2261.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:29] [ns_1@127.0.0.1:<0.2282.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:30] [ns_1@127.0.0.1:<0.2219.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:42:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2250.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:42:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2289.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:42:30] [ns_1@127.0.0.1:<0.2276.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:30] [ns_1@127.0.0.1:<0.2282.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:31] [ns_1@127.0.0.1:<0.2194.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:31] [ns_1@127.0.0.1:<0.2272.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:31] [ns_1@127.0.0.1:<0.2282.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:32] [ns_1@127.0.0.1:<0.2232.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:32] [ns_1@127.0.0.1:<0.2295.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:32] [ns_1@127.0.0.1:<0.2282.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:33] [ns_1@127.0.0.1:<0.2205.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:33] [ns_1@127.0.0.1:<0.2286.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:42:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.2282.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:42:34] [ns_1@127.0.0.1:<0.2245.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:34] [ns_1@127.0.0.1:<0.2308.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:35] [ns_1@127.0.0.1:<0.2225.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:42:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2289.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:42:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2322.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:42:35] [ns_1@127.0.0.1:<0.2302.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:36] [ns_1@127.0.0.1:<0.2263.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:36] [ns_1@127.0.0.1:<0.2319.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:37] [ns_1@127.0.0.1:<0.2237.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:37] [ns_1@127.0.0.1:<0.2315.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:38] [ns_1@127.0.0.1:<0.2274.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:38] [ns_1@127.0.0.1:<0.2335.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:39] [ns_1@127.0.0.1:<0.2253.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:39] [ns_1@127.0.0.1:<0.2331.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:39] [ns_1@127.0.0.1:<0.2352.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:42:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2322.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:42:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2356.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:42:40] [ns_1@127.0.0.1:<0.2292.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:40] [ns_1@127.0.0.1:<0.2346.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:40] [ns_1@127.0.0.1:<0.2352.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:41] [ns_1@127.0.0.1:<0.2255.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:41] [ns_1@127.0.0.1:<0.2352.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:41] [ns_1@127.0.0.1:<0.2341.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:42] [ns_1@127.0.0.1:<0.2304.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:42] [ns_1@127.0.0.1:<0.2352.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:42] [ns_1@127.0.0.1:<0.2364.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:43] [ns_1@127.0.0.1:<0.2257.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:42:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.2352.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:42:43] [ns_1@127.0.0.1:<0.2357.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:44] [ns_1@127.0.0.1:<0.2317.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:44] [ns_1@127.0.0.1:<0.2377.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:42:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2356.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:42:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2390.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:42:45] [ns_1@127.0.0.1:<0.2268.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:45] [ns_1@127.0.0.1:<0.2372.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:46] [ns_1@127.0.0.1:<0.2333.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:46] [ns_1@127.0.0.1:<0.2391.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:47] [ns_1@127.0.0.1:<0.2278.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:47] [ns_1@127.0.0.1:<0.2384.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:48] [ns_1@127.0.0.1:<0.2343.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:48] [ns_1@127.0.0.1:<0.2403.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:49] [ns_1@127.0.0.1:<0.2297.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:49] [ns_1@127.0.0.1:<0.2418.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:49] [ns_1@127.0.0.1:<0.2399.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:42:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2390.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:42:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2424.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:42:50] [ns_1@127.0.0.1:<0.2361.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:50] [ns_1@127.0.0.1:<0.2418.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:50] [ns_1@127.0.0.1:<0.2414.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:51] [ns_1@127.0.0.1:<0.2310.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:51] [ns_1@127.0.0.1:<0.2418.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:51] [ns_1@127.0.0.1:<0.2410.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:52] [ns_1@127.0.0.1:<0.2375.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:52] [ns_1@127.0.0.1:<0.2418.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:52] [ns_1@127.0.0.1:<0.2434.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:53] [ns_1@127.0.0.1:<0.2326.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:42:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.2418.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:42:53] [ns_1@127.0.0.1:<0.2428.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:54] [ns_1@127.0.0.1:<0.2387.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:54] [ns_1@127.0.0.1:<0.2446.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:42:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2424.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:42:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2459.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:42:55] [ns_1@127.0.0.1:<0.2337.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:55] [ns_1@127.0.0.1:<0.2441.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:56] [ns_1@127.0.0.1:<0.2401.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:57] [ns_1@127.0.0.1:<0.2462.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:57] [ns_1@127.0.0.1:<0.2348.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:58] [ns_1@127.0.0.1:<0.2454.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:58] [ns_1@127.0.0.1:<0.2412.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:59] [ns_1@127.0.0.1:<0.2473.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:42:59] [ns_1@127.0.0.1:<0.2368.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:42:59] [ns_1@127.0.0.1:<0.2488.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:42:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,751375,410340}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37843992}, {processes,10136448}, {processes_used,8510968}, {system,27707544}, {atom,1306681}, {atom_used,1284164}, {binary,582048}, {code,12859877}, {ets,2180400}]}, {system_stats, [{cpu_utilization_rate,25.25}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,2397}, {memory_data,{4040077312,4011638784,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27524 kB\nBuffers: 58392 kB\nCached: 3531504 kB\nSwapCached: 0 kB\nActive: 305044 kB\nInactive: 3444436 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27524 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 76 kB\nWriteback: 0 kB\nAnonPages: 159612 kB\nMapped: 24868 kB\nSlab: 134424 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577988 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616260096}, {buffered_memory,59793408}, {free_memory,28184576}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{2392144,0}}, {context_switches,{923540,0}}, {garbage_collection,{477760,617611590,0}}, {io,{{input,20757175},{output,32245269}}}, {reductions,{203121503,644447}}, {run_queue,0}, {runtime,{37360,140}}]}]}] [stats:error] [2012-03-26 1:43:00] [ns_1@127.0.0.1:<0.2468.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:43:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2459.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:43:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2495.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:43:00] [ns_1@127.0.0.1:<0.2432.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:00] [ns_1@127.0.0.1:<0.2488.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:01] [ns_1@127.0.0.1:<0.2483.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:01] [ns_1@127.0.0.1:<0.2382.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:01] [ns_1@127.0.0.1:<0.2488.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:02] [ns_1@127.0.0.1:<0.2479.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:02] [ns_1@127.0.0.1:<0.2444.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:02] [ns_1@127.0.0.1:<0.2488.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:03] [ns_1@127.0.0.1:<0.2503.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:03] [ns_1@127.0.0.1:<0.2397.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:43:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.2488.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:43:04] [ns_1@127.0.0.1:<0.2498.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:04] [ns_1@127.0.0.1:<0.2456.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:05] [ns_1@127.0.0.1:<0.2516.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:43:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2495.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:43:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2528.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:43:05] [ns_1@127.0.0.1:<0.2408.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:06] [ns_1@127.0.0.1:<0.2510.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:06] [ns_1@127.0.0.1:<0.2471.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:07] [ns_1@127.0.0.1:<0.2532.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:07] [ns_1@127.0.0.1:<0.2421.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:08] [ns_1@127.0.0.1:<0.2523.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:08] [ns_1@127.0.0.1:<0.2481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:09] [ns_1@127.0.0.1:<0.2543.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:09] [ns_1@127.0.0.1:<0.2439.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:09] [ns_1@127.0.0.1:<0.2560.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:10] [ns_1@127.0.0.1:<0.2539.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:43:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2528.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:43:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2566.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:43:10] [ns_1@127.0.0.1:<0.2501.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:10] [ns_1@127.0.0.1:<0.2560.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:11] [ns_1@127.0.0.1:<0.2554.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:11] [ns_1@127.0.0.1:<0.2452.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:11] [ns_1@127.0.0.1:<0.2560.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:12] [ns_1@127.0.0.1:<0.2549.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:12] [ns_1@127.0.0.1:<0.2514.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:12] [ns_1@127.0.0.1:<0.2560.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:13] [ns_1@127.0.0.1:<0.2574.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:13] [ns_1@127.0.0.1:<0.2466.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:43:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.2560.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:43:14] [ns_1@127.0.0.1:<0.2569.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:14] [ns_1@127.0.0.1:<0.2525.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:43:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2566.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:43:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2598.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:43:15] [ns_1@127.0.0.1:<0.2587.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:15] [ns_1@127.0.0.1:<0.2477.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:16] [ns_1@127.0.0.1:<0.2582.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:16] [ns_1@127.0.0.1:<0.2541.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:17] [ns_1@127.0.0.1:<0.2603.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:17] [ns_1@127.0.0.1:<0.2492.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:18] [ns_1@127.0.0.1:<0.2594.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:18] [ns_1@127.0.0.1:<0.2552.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:19] [ns_1@127.0.0.1:<0.2615.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:19] [ns_1@127.0.0.1:<0.2626.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:19] [ns_1@127.0.0.1:<0.2508.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:43:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2598.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:43:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2632.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:43:20] [ns_1@127.0.0.1:<0.2609.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:20] [ns_1@127.0.0.1:<0.2626.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:20] [ns_1@127.0.0.1:<0.2572.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:21] [ns_1@127.0.0.1:<0.2629.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:21] [ns_1@127.0.0.1:<0.2626.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:21] [ns_1@127.0.0.1:<0.2521.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:22] [ns_1@127.0.0.1:<0.2620.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:22] [ns_1@127.0.0.1:<0.2626.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:22] [ns_1@127.0.0.1:<0.2585.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:23] [ns_1@127.0.0.1:<0.2647.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:43:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.2626.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:43:23] [ns_1@127.0.0.1:<0.2537.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:24] [ns_1@127.0.0.1:<0.2640.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:24] [ns_1@127.0.0.1:<0.2599.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:43:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2632.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:43:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2667.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:43:25] [ns_1@127.0.0.1:<0.2660.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:25] [ns_1@127.0.0.1:<0.2547.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:25] [ns_1@127.0.0.1:<0.2563.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:25] [ns_1@127.0.0.1:<0.2580.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:26] [ns_1@127.0.0.1:<0.2652.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:26] [ns_1@127.0.0.1:<0.2611.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:27] [ns_1@127.0.0.1:<0.2674.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:27] [ns_1@127.0.0.1:<0.2592.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:28] [ns_1@127.0.0.1:<0.2664.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:28] [ns_1@127.0.0.1:<0.2622.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:29] [ns_1@127.0.0.1:<0.2676.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:29] [ns_1@127.0.0.1:<0.2699.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:29] [ns_1@127.0.0.1:<0.2607.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:43:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2667.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:43:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2706.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:43:30] [ns_1@127.0.0.1:<0.2683.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:30] [ns_1@127.0.0.1:<0.2699.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:30] [ns_1@127.0.0.1:<0.2642.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:31] [ns_1@127.0.0.1:<0.2678.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:31] [ns_1@127.0.0.1:<0.2699.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:32] [ns_1@127.0.0.1:<0.2618.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:32] [ns_1@127.0.0.1:<0.2693.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:32] [ns_1@127.0.0.1:<0.2699.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:33] [ns_1@127.0.0.1:<0.2654.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:33] [ns_1@127.0.0.1:<0.2689.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:43:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.2699.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:43:34] [ns_1@127.0.0.1:<0.2633.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:34] [ns_1@127.0.0.1:<0.2712.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:35] [ns_1@127.0.0.1:<0.2668.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:43:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2706.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:43:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2739.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:43:35] [ns_1@127.0.0.1:<0.2703.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:36] [ns_1@127.0.0.1:<0.2649.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:36] [ns_1@127.0.0.1:<0.2725.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:37] [ns_1@127.0.0.1:<0.2685.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:37] [ns_1@127.0.0.1:<0.2719.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:38] [ns_1@127.0.0.1:<0.2662.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:38] [ns_1@127.0.0.1:<0.2736.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:39] [ns_1@127.0.0.1:<0.2695.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:39] [ns_1@127.0.0.1:<0.2732.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:39] [ns_1@127.0.0.1:<0.2769.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:40] [ns_1@127.0.0.1:<0.2680.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:43:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2739.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:43:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2775.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:43:40] [ns_1@127.0.0.1:<0.2752.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:40] [ns_1@127.0.0.1:<0.2769.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:41] [ns_1@127.0.0.1:<0.2714.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:41] [ns_1@127.0.0.1:<0.2748.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:41] [ns_1@127.0.0.1:<0.2769.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:42] [ns_1@127.0.0.1:<0.2691.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:42] [ns_1@127.0.0.1:<0.2763.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:42] [ns_1@127.0.0.1:<0.2769.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:43] [ns_1@127.0.0.1:<0.2727.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:43] [ns_1@127.0.0.1:<0.2758.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:43:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.2769.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:43:44] [ns_1@127.0.0.1:<0.2709.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:44] [ns_1@127.0.0.1:<0.2781.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:45] [ns_1@127.0.0.1:<0.2743.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:43:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2775.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:43:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2809.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:43:45] [ns_1@127.0.0.1:<0.2772.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:46] [ns_1@127.0.0.1:<0.2721.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:46] [ns_1@127.0.0.1:<0.2794.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:47] [ns_1@127.0.0.1:<0.2754.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:47] [ns_1@127.0.0.1:<0.2789.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:48] [ns_1@127.0.0.1:<0.2734.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:48] [ns_1@127.0.0.1:<0.2806.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:49] [ns_1@127.0.0.1:<0.2765.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:49] [ns_1@127.0.0.1:<0.2801.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:49] [ns_1@127.0.0.1:<0.2837.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:43:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2809.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:43:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2841.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:43:50] [ns_1@127.0.0.1:<0.2750.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:50] [ns_1@127.0.0.1:<0.2820.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:50] [ns_1@127.0.0.1:<0.2837.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:51] [ns_1@127.0.0.1:<0.2783.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:51] [ns_1@127.0.0.1:<0.2816.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:51] [ns_1@127.0.0.1:<0.2837.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:52] [ns_1@127.0.0.1:<0.2760.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:52] [ns_1@127.0.0.1:<0.2837.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:52] [ns_1@127.0.0.1:<0.2831.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:53] [ns_1@127.0.0.1:<0.2796.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:43:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.2837.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:43:53] [ns_1@127.0.0.1:<0.2827.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:54] [ns_1@127.0.0.1:<0.2778.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:54] [ns_1@127.0.0.1:<0.2851.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:43:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2841.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:43:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2876.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:43:55] [ns_1@127.0.0.1:<0.2812.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:55] [ns_1@127.0.0.1:<0.2842.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:56] [ns_1@127.0.0.1:<0.2791.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:56] [ns_1@127.0.0.1:<0.2863.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:57] [ns_1@127.0.0.1:<0.2822.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:57] [ns_1@127.0.0.1:<0.2858.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:58] [ns_1@127.0.0.1:<0.2803.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:58] [ns_1@127.0.0.1:<0.2877.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:43:59] [ns_1@127.0.0.1:<0.2833.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:43:59] [ns_1@127.0.0.1:<0.2905.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:43:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,751435,441351}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37830576}, {processes,10138312}, {processes_used,8512832}, {system,27692264}, {atom,1306681}, {atom_used,1284164}, {binary,588080}, {code,12859877}, {ets,2151760}]}, {system_stats, [{cpu_utilization_rate,25.25}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,2457}, {memory_data,{4040077312,4011892736,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27400 kB\nBuffers: 58468 kB\nCached: 3531664 kB\nSwapCached: 0 kB\nActive: 305484 kB\nInactive: 3444620 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27400 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 72 kB\nWriteback: 0 kB\nAnonPages: 159920 kB\nMapped: 24868 kB\nSlab: 134388 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577988 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616423936}, {buffered_memory,59871232}, {free_memory,28057600}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{2452178,0}}, {context_switches,{936612,0}}, {garbage_collection,{485029,628930325,0}}, {io,{{input,20787764},{output,32686679}}}, {reductions,{205748438,664327}}, {run_queue,0}, {runtime,{37940,150}}]}]}] [stats:error] [2012-03-26 1:43:59] [ns_1@127.0.0.1:<0.2871.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:44:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2876.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:44:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2912.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:44:00] [ns_1@127.0.0.1:<0.2818.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:00] [ns_1@127.0.0.1:<0.2905.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:00] [ns_1@127.0.0.1:<0.2890.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:01] [ns_1@127.0.0.1:<0.2853.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:01] [ns_1@127.0.0.1:<0.2905.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:01] [ns_1@127.0.0.1:<0.2885.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:02] [ns_1@127.0.0.1:<0.2829.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:02] [ns_1@127.0.0.1:<0.2905.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:02] [ns_1@127.0.0.1:<0.2900.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:03] [ns_1@127.0.0.1:<0.2868.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:44:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.2905.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:44:03] [ns_1@127.0.0.1:<0.2896.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:04] [ns_1@127.0.0.1:<0.2847.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:04] [ns_1@127.0.0.1:<0.2920.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:44:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2912.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:44:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2945.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:44:05] [ns_1@127.0.0.1:<0.2883.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:05] [ns_1@127.0.0.1:<0.2913.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:06] [ns_1@127.0.0.1:<0.2861.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:06] [ns_1@127.0.0.1:<0.2933.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:07] [ns_1@127.0.0.1:<0.2894.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:08] [ns_1@127.0.0.1:<0.2927.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:08] [ns_1@127.0.0.1:<0.2873.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:09] [ns_1@127.0.0.1:<0.2949.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:09] [ns_1@127.0.0.1:<0.2909.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:09] [ns_1@127.0.0.1:<0.2977.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:10] [ns_1@127.0.0.1:<0.2940.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:44:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2945.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:44:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2983.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:44:10] [ns_1@127.0.0.1:<0.2888.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:10] [ns_1@127.0.0.1:<0.2977.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:11] [ns_1@127.0.0.1:<0.2960.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:11] [ns_1@127.0.0.1:<0.2925.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:11] [ns_1@127.0.0.1:<0.2977.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:12] [ns_1@127.0.0.1:<0.2956.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:12] [ns_1@127.0.0.1:<0.2898.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:12] [ns_1@127.0.0.1:<0.2977.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:13] [ns_1@127.0.0.1:<0.2971.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:13] [ns_1@127.0.0.1:<0.2938.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:44:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.2977.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:44:14] [ns_1@127.0.0.1:<0.2966.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:14] [ns_1@127.0.0.1:<0.2918.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:15] [ns_1@127.0.0.1:<0.2991.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:44:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2983.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:44:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3017.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:44:15] [ns_1@127.0.0.1:<0.2954.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:16] [ns_1@127.0.0.1:<0.2986.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:16] [ns_1@127.0.0.1:<0.2931.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:17] [ns_1@127.0.0.1:<0.3004.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:17] [ns_1@127.0.0.1:<0.2964.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:18] [ns_1@127.0.0.1:<0.2999.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:18] [ns_1@127.0.0.1:<0.2942.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:19] [ns_1@127.0.0.1:<0.3020.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:19] [ns_1@127.0.0.1:<0.2980.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:19] [ns_1@127.0.0.1:<0.3045.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:20] [ns_1@127.0.0.1:<0.3011.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:44:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3017.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:44:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3051.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:44:20] [ns_1@127.0.0.1:<0.2958.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:20] [ns_1@127.0.0.1:<0.3045.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:21] [ns_1@127.0.0.1:<0.3030.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:21] [ns_1@127.0.0.1:<0.2997.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:21] [ns_1@127.0.0.1:<0.3045.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:22] [ns_1@127.0.0.1:<0.3026.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:22] [ns_1@127.0.0.1:<0.2969.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:22] [ns_1@127.0.0.1:<0.3045.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:23] [ns_1@127.0.0.1:<0.3041.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:23] [ns_1@127.0.0.1:<0.3009.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:44:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.3045.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:44:24] [ns_1@127.0.0.1:<0.3037.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:24] [ns_1@127.0.0.1:<0.2989.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:44:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3051.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:44:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3084.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:44:25] [ns_1@127.0.0.1:<0.3061.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:25] [ns_1@127.0.0.1:<0.3024.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:26] [ns_1@127.0.0.1:<0.3055.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:26] [ns_1@127.0.0.1:<0.3068.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:26] [ns_1@127.0.0.1:<0.3081.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:26] [ns_1@127.0.0.1:<0.3002.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:26] [ns_1@127.0.0.1:<0.3014.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:26] [ns_1@127.0.0.1:<0.3028.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:27] [ns_1@127.0.0.1:<0.3073.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:27] [ns_1@127.0.0.1:<0.3035.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:28] [ns_1@127.0.0.1:<0.3103.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:28] [ns_1@127.0.0.1:<0.3039.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:29] [ns_1@127.0.0.1:<0.3089.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:29] [ns_1@127.0.0.1:<0.3120.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:29] [ns_1@127.0.0.1:<0.3048.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:44:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3084.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:44:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3127.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:44:30] [ns_1@127.0.0.1:<0.3114.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:30] [ns_1@127.0.0.1:<0.3120.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:30] [ns_1@127.0.0.1:<0.3059.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:31] [ns_1@127.0.0.1:<0.3110.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:31] [ns_1@127.0.0.1:<0.3120.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:31] [ns_1@127.0.0.1:<0.3066.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:32] [ns_1@127.0.0.1:<0.3133.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:32] [ns_1@127.0.0.1:<0.3120.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:32] [ns_1@127.0.0.1:<0.3071.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:33] [ns_1@127.0.0.1:<0.3124.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:44:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.3120.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:44:33] [ns_1@127.0.0.1:<0.3079.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:34] [ns_1@127.0.0.1:<0.3146.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:34] [ns_1@127.0.0.1:<0.3085.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:44:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3127.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:44:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3160.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:44:35] [ns_1@127.0.0.1:<0.3140.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:35] [ns_1@127.0.0.1:<0.3093.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:36] [ns_1@127.0.0.1:<0.3157.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:36] [ns_1@127.0.0.1:<0.3106.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:37] [ns_1@127.0.0.1:<0.3153.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:37] [ns_1@127.0.0.1:<0.3095.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:38] [ns_1@127.0.0.1:<0.3173.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:38] [ns_1@127.0.0.1:<0.3116.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:39] [ns_1@127.0.0.1:<0.3169.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:39] [ns_1@127.0.0.1:<0.3190.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:39] [ns_1@127.0.0.1:<0.3097.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:44:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3160.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:44:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3196.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:44:40] [ns_1@127.0.0.1:<0.3184.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:40] [ns_1@127.0.0.1:<0.3190.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:40] [ns_1@127.0.0.1:<0.3135.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:41] [ns_1@127.0.0.1:<0.3179.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:41] [ns_1@127.0.0.1:<0.3190.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:41] [ns_1@127.0.0.1:<0.3099.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:42] [ns_1@127.0.0.1:<0.3202.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:42] [ns_1@127.0.0.1:<0.3190.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:43] [ns_1@127.0.0.1:<0.3148.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:43] [ns_1@127.0.0.1:<0.3193.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:44:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.3190.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:44:44] [ns_1@127.0.0.1:<0.3101.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:44] [ns_1@127.0.0.1:<0.3215.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:45] [ns_1@127.0.0.1:<0.3161.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:44:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3196.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:44:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3230.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:44:45] [ns_1@127.0.0.1:<0.3210.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:46] [ns_1@127.0.0.1:<0.3112.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:46] [ns_1@127.0.0.1:<0.3227.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:47] [ns_1@127.0.0.1:<0.3175.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:47] [ns_1@127.0.0.1:<0.3222.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:48] [ns_1@127.0.0.1:<0.3128.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:48] [ns_1@127.0.0.1:<0.3241.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:49] [ns_1@127.0.0.1:<0.3186.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:49] [ns_1@127.0.0.1:<0.3237.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:49] [ns_1@127.0.0.1:<0.3258.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:50] [ns_1@127.0.0.1:<0.3142.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:44:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3230.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:44:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3264.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:44:50] [ns_1@127.0.0.1:<0.3252.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:50] [ns_1@127.0.0.1:<0.3258.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:51] [ns_1@127.0.0.1:<0.3204.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:51] [ns_1@127.0.0.1:<0.3248.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:51] [ns_1@127.0.0.1:<0.3258.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:52] [ns_1@127.0.0.1:<0.3155.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:52] [ns_1@127.0.0.1:<0.3272.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:52] [ns_1@127.0.0.1:<0.3258.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:53] [ns_1@127.0.0.1:<0.3217.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:53] [ns_1@127.0.0.1:<0.3261.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:44:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.3258.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:44:54] [ns_1@127.0.0.1:<0.3171.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:54] [ns_1@127.0.0.1:<0.3284.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:55] [ns_1@127.0.0.1:<0.3233.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:44:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3264.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:44:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3299.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:44:55] [ns_1@127.0.0.1:<0.3279.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:56] [ns_1@127.0.0.1:<0.3181.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:56] [ns_1@127.0.0.1:<0.3296.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:57] [ns_1@127.0.0.1:<0.3243.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:57] [ns_1@127.0.0.1:<0.3292.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:58] [ns_1@127.0.0.1:<0.3199.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:58] [ns_1@127.0.0.1:<0.3311.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:59] [ns_1@127.0.0.1:<0.3254.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:44:59] [ns_1@127.0.0.1:<0.3306.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:44:59] [ns_1@127.0.0.1:<0.3335.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:44:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,751495,470355}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37778136}, {processes,10051896}, {processes_used,8426416}, {system,27726240}, {atom,1306681}, {atom_used,1284164}, {binary,586552}, {code,12859877}, {ets,2180832}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,2517}, {memory_data,{4040077312,4012019712,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27276 kB\nBuffers: 58540 kB\nCached: 3531824 kB\nSwapCached: 0 kB\nActive: 305300 kB\nInactive: 3444684 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27276 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 64 kB\nWriteback: 0 kB\nAnonPages: 159632 kB\nMapped: 24868 kB\nSlab: 134392 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577988 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616587776}, {buffered_memory,59944960}, {free_memory,27930624}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{2512206,0}}, {context_switches,{949694,0}}, {garbage_collection,{492121,640256314,0}}, {io,{{input,20818371},{output,33132363}}}, {reductions,{208357551,634543}}, {run_queue,0}, {runtime,{38510,140}}]}]}] [error_logger:error] [2012-03-26 1:45:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3299.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:45:00] [ns_1@127.0.0.1:<0.3335.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:01] [ns_1@127.0.0.1:<0.3335.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:02] [ns_1@127.0.0.1:<0.3335.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:info] [2012-03-26 1:45:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3340.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:45:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:45:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.3335.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:45:04] [ns_1@127.0.0.1:<0.3317.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:04] [ns_1@127.0.0.1:<0.3239.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:05] [ns_1@127.0.0.1:<0.3321.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:45:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3340.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:45:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3365.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:45:05] [ns_1@127.0.0.1:<0.3274.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:06] [ns_1@127.0.0.1:<0.3212.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:06] [ns_1@127.0.0.1:<0.3250.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:07] [ns_1@127.0.0.1:<0.3369.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:07] [ns_1@127.0.0.1:<0.3286.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:08] [ns_1@127.0.0.1:<0.3360.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:08] [ns_1@127.0.0.1:<0.3268.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:09] [ns_1@127.0.0.1:<0.3380.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:09] [ns_1@127.0.0.1:<0.3302.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:09] [ns_1@127.0.0.1:<0.3397.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:10] [ns_1@127.0.0.1:<0.3224.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:45:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3365.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:45:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3403.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:45:10] [ns_1@127.0.0.1:<0.3281.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:10] [ns_1@127.0.0.1:<0.3397.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:11] [ns_1@127.0.0.1:<0.3391.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:11] [ns_1@127.0.0.1:<0.3313.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:11] [ns_1@127.0.0.1:<0.3397.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:12] [ns_1@127.0.0.1:<0.3376.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:12] [ns_1@127.0.0.1:<0.3294.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:12] [ns_1@127.0.0.1:<0.3397.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:13] [ns_1@127.0.0.1:<0.3411.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:13] [ns_1@127.0.0.1:<0.3330.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:45:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.3397.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:45:14] [ns_1@127.0.0.1:<0.3386.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:14] [ns_1@127.0.0.1:<0.3308.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:15] [ns_1@127.0.0.1:<0.3424.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:45:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3403.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:45:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3437.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:45:15] [ns_1@127.0.0.1:<0.3358.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:16] [ns_1@127.0.0.1:<0.3406.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:16] [ns_1@127.0.0.1:<0.3319.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:17] [ns_1@127.0.0.1:<0.3440.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:17] [ns_1@127.0.0.1:<0.3374.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:18] [ns_1@127.0.0.1:<0.3419.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:18] [ns_1@127.0.0.1:<0.3362.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:19] [ns_1@127.0.0.1:<0.3450.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:19] [ns_1@127.0.0.1:<0.3384.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:19] [ns_1@127.0.0.1:<0.3465.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:45:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3437.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:45:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3469.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:45:20] [ns_1@127.0.0.1:<0.3431.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:20] [ns_1@127.0.0.1:<0.3378.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:20] [ns_1@127.0.0.1:<0.3465.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:21] [ns_1@127.0.0.1:<0.3461.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:21] [ns_1@127.0.0.1:<0.3465.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:21] [ns_1@127.0.0.1:<0.3400.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:22] [ns_1@127.0.0.1:<0.3446.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:22] [ns_1@127.0.0.1:<0.3465.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:22] [ns_1@127.0.0.1:<0.3389.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:23] [ns_1@127.0.0.1:<0.3484.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:45:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.3465.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:45:23] [ns_1@127.0.0.1:<0.3417.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:24] [ns_1@127.0.0.1:<0.3457.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:24] [ns_1@127.0.0.1:<0.3409.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:45:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3469.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:45:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3504.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:45:25] [ns_1@127.0.0.1:<0.3496.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:25] [ns_1@127.0.0.1:<0.3429.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:26] [ns_1@127.0.0.1:<0.3475.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:26] [ns_1@127.0.0.1:<0.3422.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:26] [ns_1@127.0.0.1:<0.3434.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:26] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:45:26] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:45:26] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:45:26] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:45:26] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:warn] [2012-03-26 1:45:31] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 1:45:31] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:45:31] [ns_1@127.0.0.1:<0.3448.1>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 1:45:31] [ns_1@127.0.0.1:<0.3470.1>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:45:31] [ns_1@127.0.0.1:<0.3550.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:31] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:45:31] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:45:31] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:45:31] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:45:31] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 1:45:31] [ns_1@127.0.0.1:<0.3486.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:31] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:45:31] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:45:31] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:45:31] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:45:31] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:45:33: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 1:45:31] [ns_1@127.0.0.1:<0.3499.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:32] [ns_1@127.0.0.1:<0.3550.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:32] [ns_1@127.0.0.1:<0.3459.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:32] [ns_1@127.0.0.1:<0.3479.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:33] [ns_1@127.0.0.1:<0.3550.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:33] [ns_1@127.0.0.1:<0.3562.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:33] [ns_1@127.0.0.1:<0.3513.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:34] [ns_1@127.0.0.1:<0.3550.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:34] [ns_1@127.0.0.1:<0.3568.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:34] [ns_1@127.0.0.1:<0.3491.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:45:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3504.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:45:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3583.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:45:35] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:45:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.3550.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:45:35] [ns_1@127.0.0.1:<0.3575.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:36] [ns_1@127.0.0.1:<0.3515.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:36] [ns_1@127.0.0.1:<0.3580.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:37] [ns_1@127.0.0.1:<0.3505.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:37] [ns_1@127.0.0.1:<0.3593.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:38] [ns_1@127.0.0.1:<0.3518.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:38] [ns_1@127.0.0.1:<0.3597.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:39] [ns_1@127.0.0.1:<0.3543.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:39] [ns_1@127.0.0.1:<0.3603.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:39] [ns_1@127.0.0.1:<0.3614.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:40] [ns_1@127.0.0.1:<0.3521.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:45:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3583.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:45:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3620.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:45:40] [ns_1@127.0.0.1:<0.3608.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:40] [ns_1@127.0.0.1:<0.3614.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:41] [ns_1@127.0.0.1:<0.3544.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:41] [ns_1@127.0.0.1:<0.3617.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:41] [ns_1@127.0.0.1:<0.3614.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:42] [ns_1@127.0.0.1:<0.3523.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:42] [ns_1@127.0.0.1:<0.3626.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:42] [ns_1@127.0.0.1:<0.3614.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:43] [ns_1@127.0.0.1:<0.3545.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:43] [ns_1@127.0.0.1:<0.3634.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:45:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.3614.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:45:44] [ns_1@127.0.0.1:<0.3525.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:44] [ns_1@127.0.0.1:<0.3639.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:45] [ns_1@127.0.0.1:<0.3546.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:45:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3620.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:45:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3654.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:45:45] [ns_1@127.0.0.1:<0.3646.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:46] [ns_1@127.0.0.1:<0.3565.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:46] [ns_1@127.0.0.1:<0.3651.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:47] [ns_1@127.0.0.1:<0.3552.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:47] [ns_1@127.0.0.1:<0.3661.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:48] [ns_1@127.0.0.1:<0.3578.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:48] [ns_1@127.0.0.1:<0.3665.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:49] [ns_1@127.0.0.1:<0.3571.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:49] [ns_1@127.0.0.1:<0.3672.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:49] [ns_1@127.0.0.1:<0.3682.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:50] [ns_1@127.0.0.1:<0.3595.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:45:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3654.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:45:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3688.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:45:50] [ns_1@127.0.0.1:<0.3676.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:50] [ns_1@127.0.0.1:<0.3682.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:51] [ns_1@127.0.0.1:<0.3588.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:51] [ns_1@127.0.0.1:<0.3685.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:51] [ns_1@127.0.0.1:<0.3682.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:52] [ns_1@127.0.0.1:<0.3605.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:52] [ns_1@127.0.0.1:<0.3696.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:52] [ns_1@127.0.0.1:<0.3682.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:53] [ns_1@127.0.0.1:<0.3599.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:53] [ns_1@127.0.0.1:<0.3703.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:45:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.3682.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:45:54] [ns_1@127.0.0.1:<0.3623.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:54] [ns_1@127.0.0.1:<0.3708.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:45:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3688.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:45:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3721.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:45:55] [ns_1@127.0.0.1:<0.3610.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:55] [ns_1@127.0.0.1:<0.3716.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:56] [ns_1@127.0.0.1:<0.3636.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:56] [ns_1@127.0.0.1:<0.3722.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:57] [ns_1@127.0.0.1:<0.3628.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:57] [ns_1@127.0.0.1:<0.3730.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:58] [ns_1@127.0.0.1:<0.3648.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:58] [ns_1@127.0.0.1:<0.3735.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:59] [ns_1@127.0.0.1:<0.3641.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:45:59] [ns_1@127.0.0.1:<0.3750.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:45:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,751555,497877}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38040040}, {processes,10245880}, {processes_used,8620400}, {system,27794160}, {atom,1306681}, {atom_used,1284164}, {binary,616568}, {code,12859877}, {ets,2212512}]}, {system_stats, [{cpu_utilization_rate,25.376884422110553}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,2577}, {memory_data,{4040077312,4012146688,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27516 kB\nBuffers: 58688 kB\nCached: 3531568 kB\nSwapCached: 0 kB\nActive: 305424 kB\nInactive: 3444504 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27516 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 8 kB\nWriteback: 0 kB\nAnonPages: 159684 kB\nMapped: 24868 kB\nSlab: 134444 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 575668 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616325632}, {buffered_memory,60096512}, {free_memory,28176384}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{2572233,0}}, {context_switches,{962977,0}}, {garbage_collection,{499278,651477075,0}}, {io,{{input,21100443},{output,33983531}}}, {reductions,{211069544,647974}}, {run_queue,0}, {runtime,{39150,160}}]}]}] [ns_server:info] [2012-03-26 1:46:00] [ns_1@127.0.0.1:<0.3750.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:45:59] [ns_1@127.0.0.1:<0.3741.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:46:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3721.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:46:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3757.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:46:01] [ns_1@127.0.0.1:<0.3750.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:01] [ns_1@127.0.0.1:<0.3739.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:02] [ns_1@127.0.0.1:<0.3663.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:02] [ns_1@127.0.0.1:<0.3750.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:02] [ns_1@127.0.0.1:<0.3743.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [stats:error] [2012-03-26 1:46:03] [ns_1@127.0.0.1:<0.3745.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:46:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.3750.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 1:46:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3757.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:46:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3778.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:46:06] [ns_1@127.0.0.1:<0.3766.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:06] [ns_1@127.0.0.1:<0.3674.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:07] [ns_1@127.0.0.1:<0.3772.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:07] [ns_1@127.0.0.1:<0.3657.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:08] [ns_1@127.0.0.1:<0.3787.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:08] [ns_1@127.0.0.1:<0.3692.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:09] [ns_1@127.0.0.1:<0.3791.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:09] [ns_1@127.0.0.1:<0.3667.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:09] [ns_1@127.0.0.1:<0.3808.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:10] [ns_1@127.0.0.1:<0.3797.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:46:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3778.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:46:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3814.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:46:10] [ns_1@127.0.0.1:<0.3705.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:10] [ns_1@127.0.0.1:<0.3808.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:11] [ns_1@127.0.0.1:<0.3802.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:11] [ns_1@127.0.0.1:<0.3678.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:11] [ns_1@127.0.0.1:<0.3808.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:12] [ns_1@127.0.0.1:<0.3817.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:12] [ns_1@127.0.0.1:<0.3718.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:12] [ns_1@127.0.0.1:<0.3808.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:13] [ns_1@127.0.0.1:<0.3822.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:13] [ns_1@127.0.0.1:<0.3698.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:46:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.3808.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:46:14] [ns_1@127.0.0.1:<0.3830.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:14] [ns_1@127.0.0.1:<0.3732.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:15] [ns_1@127.0.0.1:<0.3835.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:46:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3814.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:46:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3848.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:46:15] [ns_1@127.0.0.1:<0.3710.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:16] [ns_1@127.0.0.1:<0.3842.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:16] [ns_1@127.0.0.1:<0.3770.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:17] [ns_1@127.0.0.1:<0.3851.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:17] [ns_1@127.0.0.1:<0.3728.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:18] [ns_1@127.0.0.1:<0.3857.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:18] [ns_1@127.0.0.1:<0.3789.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:19] [ns_1@127.0.0.1:<0.3861.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:19] [ns_1@127.0.0.1:<0.3874.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:19] [ns_1@127.0.0.1:<0.3764.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:46:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3848.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:46:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3880.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:46:20] [ns_1@127.0.0.1:<0.3868.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:20] [ns_1@127.0.0.1:<0.3874.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:20] [ns_1@127.0.0.1:<0.3800.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:21] [ns_1@127.0.0.1:<0.3877.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:21] [ns_1@127.0.0.1:<0.3874.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:21] [ns_1@127.0.0.1:<0.3754.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:22] [ns_1@127.0.0.1:<0.3887.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:22] [ns_1@127.0.0.1:<0.3874.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:22] [ns_1@127.0.0.1:<0.3820.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:23] [ns_1@127.0.0.1:<0.3895.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:46:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.3874.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:46:23] [ns_1@127.0.0.1:<0.3785.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:24] [ns_1@127.0.0.1:<0.3900.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:24] [ns_1@127.0.0.1:<0.3833.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:46:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3880.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:46:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3915.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:46:25] [ns_1@127.0.0.1:<0.3908.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:25] [ns_1@127.0.0.1:<0.3795.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:26] [ns_1@127.0.0.1:<0.3912.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:26] [ns_1@127.0.0.1:<0.3845.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:27] [ns_1@127.0.0.1:<0.3922.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:27] [ns_1@127.0.0.1:<0.3811.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:28] [ns_1@127.0.0.1:<0.3927.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:28] [ns_1@127.0.0.1:<0.3859.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:29] [ns_1@127.0.0.1:<0.3933.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:29] [ns_1@127.0.0.1:<0.3943.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:29] [ns_1@127.0.0.1:<0.3828.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:46:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3915.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:46:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3950.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:46:30] [ns_1@127.0.0.1:<0.3937.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:30] [ns_1@127.0.0.1:<0.3943.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:30] [ns_1@127.0.0.1:<0.3870.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:31] [ns_1@127.0.0.1:<0.3947.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:31] [ns_1@127.0.0.1:<0.3943.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:32] [ns_1@127.0.0.1:<0.3943.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:46:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.3943.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 1:46:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3950.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:46:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3969.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:46:39] [ns_1@127.0.0.1:<0.3981.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:46:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3969.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:46:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3985.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:46:40] [ns_1@127.0.0.1:<0.3981.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:41] [ns_1@127.0.0.1:<0.3840.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:41] [ns_1@127.0.0.1:<0.3981.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:41] [ns_1@127.0.0.1:<0.3855.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:42] [ns_1@127.0.0.1:<0.3956.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:42] [ns_1@127.0.0.1:<0.3981.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:42] [ns_1@127.0.0.1:<0.3890.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:43] [ns_1@127.0.0.1:<0.3995.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:46:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.3981.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:46:44] [ns_1@127.0.0.1:<0.3866.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:44] [ns_1@127.0.0.1:<0.4000.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:45] [ns_1@127.0.0.1:<0.3902.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:46:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3985.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:46:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4015.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:46:45] [ns_1@127.0.0.1:<0.4007.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:46] [ns_1@127.0.0.1:<0.3881.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:46] [ns_1@127.0.0.1:<0.4012.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:47] [ns_1@127.0.0.1:<0.3916.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:47] [ns_1@127.0.0.1:<0.4022.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:48] [ns_1@127.0.0.1:<0.3897.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:48] [ns_1@127.0.0.1:<0.4026.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:49] [ns_1@127.0.0.1:<0.3929.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:49] [ns_1@127.0.0.1:<0.4033.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:49] [ns_1@127.0.0.1:<0.4043.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:50] [ns_1@127.0.0.1:<0.3910.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:46:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4015.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:46:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4049.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:46:50] [ns_1@127.0.0.1:<0.4037.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:50] [ns_1@127.0.0.1:<0.4043.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:51] [ns_1@127.0.0.1:<0.3939.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:51] [ns_1@127.0.0.1:<0.4046.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:51] [ns_1@127.0.0.1:<0.4043.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:52] [ns_1@127.0.0.1:<0.3924.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:52] [ns_1@127.0.0.1:<0.4057.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:52] [ns_1@127.0.0.1:<0.4043.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:53] [ns_1@127.0.0.1:<0.3958.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:53] [ns_1@127.0.0.1:<0.4064.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:46:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.4043.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:46:54] [ns_1@127.0.0.1:<0.3935.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:54] [ns_1@127.0.0.1:<0.4069.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:55] [ns_1@127.0.0.1:<0.3989.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:46:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4049.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:46:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4084.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:46:55] [ns_1@127.0.0.1:<0.4077.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:56] [ns_1@127.0.0.1:<0.3951.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:56] [ns_1@127.0.0.1:<0.4081.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:57] [ns_1@127.0.0.1:<0.4002.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:57] [ns_1@127.0.0.1:<0.4091.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:58] [ns_1@127.0.0.1:<0.3997.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:58] [ns_1@127.0.0.1:<0.4096.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:59] [ns_1@127.0.0.1:<0.4018.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:46:59] [ns_1@127.0.0.1:<0.4102.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:46:59] [ns_1@127.0.0.1:<0.4113.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:46:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,751615,523244}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37978056}, {processes,10145288}, {processes_used,8519808}, {system,27832768}, {atom,1306681}, {atom_used,1284164}, {binary,619656}, {code,12859877}, {ets,2241160}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,2637}, {memory_data,{4040077312,4011892736,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27276 kB\nBuffers: 58744 kB\nCached: 3531728 kB\nSwapCached: 0 kB\nActive: 305612 kB\nInactive: 3444588 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27276 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 24 kB\nWriteback: 0 kB\nAnonPages: 159720 kB\nMapped: 24868 kB\nSlab: 134400 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577804 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616489472}, {buffered_memory,60153856}, {free_memory,27930624}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{2632259,0}}, {context_switches,{974748,0}}, {garbage_collection,{506043,660845349,0}}, {io,{{input,21130969},{output,34369956}}}, {reductions,{213373347,527829}}, {run_queue,0}, {runtime,{39640,120}}]}]}] [stats:error] [2012-03-26 1:47:00] [ns_1@127.0.0.1:<0.4009.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:47:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4084.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:47:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4120.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:47:00] [ns_1@127.0.0.1:<0.4106.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:00] [ns_1@127.0.0.1:<0.4113.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:01] [ns_1@127.0.0.1:<0.4028.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:01] [ns_1@127.0.0.1:<0.4117.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:01] [ns_1@127.0.0.1:<0.4113.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:02] [ns_1@127.0.0.1:<0.4024.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:02] [ns_1@127.0.0.1:<0.4126.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:02] [ns_1@127.0.0.1:<0.4113.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:03] [ns_1@127.0.0.1:<0.4039.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:03] [ns_1@127.0.0.1:<0.4133.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:47:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.4113.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:47:04] [ns_1@127.0.0.1:<0.4035.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:04] [ns_1@127.0.0.1:<0.4139.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:47:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4120.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:47:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4151.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:47:05] [ns_1@127.0.0.1:<0.4059.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:05] [ns_1@127.0.0.1:<0.4146.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:06] [ns_1@127.0.0.1:<0.4053.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:06] [ns_1@127.0.0.1:<0.4152.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:07] [ns_1@127.0.0.1:<0.4071.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:07] [ns_1@127.0.0.1:<0.4162.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:08] [ns_1@127.0.0.1:<0.4066.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:08] [ns_1@127.0.0.1:<0.4166.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:09] [ns_1@127.0.0.1:<0.4087.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:09] [ns_1@127.0.0.1:<0.4183.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:09] [ns_1@127.0.0.1:<0.4172.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:47:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4151.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:47:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4189.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:47:10] [ns_1@127.0.0.1:<0.4079.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:10] [ns_1@127.0.0.1:<0.4183.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:10] [ns_1@127.0.0.1:<0.4177.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:11] [ns_1@127.0.0.1:<0.4098.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:11] [ns_1@127.0.0.1:<0.4183.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:11] [ns_1@127.0.0.1:<0.4190.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:12] [ns_1@127.0.0.1:<0.4093.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:12] [ns_1@127.0.0.1:<0.4183.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:12] [ns_1@127.0.0.1:<0.4197.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:13] [ns_1@127.0.0.1:<0.4108.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:47:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.4183.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:47:13] [ns_1@127.0.0.1:<0.4205.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:14] [ns_1@127.0.0.1:<0.4104.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:14] [ns_1@127.0.0.1:<0.4210.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:47:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4189.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:47:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4223.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:47:15] [ns_1@127.0.0.1:<0.4128.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:15] [ns_1@127.0.0.1:<0.4217.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:16] [ns_1@127.0.0.1:<0.4123.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:16] [ns_1@127.0.0.1:<0.4224.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:17] [ns_1@127.0.0.1:<0.4141.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:17] [ns_1@127.0.0.1:<0.4232.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:18] [ns_1@127.0.0.1:<0.4135.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:18] [ns_1@127.0.0.1:<0.4236.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:19] [ns_1@127.0.0.1:<0.4159.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:19] [ns_1@127.0.0.1:<0.4251.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:20] [ns_1@127.0.0.1:<0.4243.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:47:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4223.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:47:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4257.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:47:20] [ns_1@127.0.0.1:<0.4148.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:20] [ns_1@127.0.0.1:<0.4251.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:21] [ns_1@127.0.0.1:<0.4247.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:21] [ns_1@127.0.0.1:<0.4170.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:21] [ns_1@127.0.0.1:<0.4251.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:22] [ns_1@127.0.0.1:<0.4258.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:22] [ns_1@127.0.0.1:<0.4164.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:22] [ns_1@127.0.0.1:<0.4251.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:23] [ns_1@127.0.0.1:<0.4267.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:23] [ns_1@127.0.0.1:<0.4186.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:47:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.4251.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:47:24] [ns_1@127.0.0.1:<0.4274.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:24] [ns_1@127.0.0.1:<0.4175.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:25] [ns_1@127.0.0.1:<0.4279.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:47:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4257.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:47:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4292.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:47:25] [ns_1@127.0.0.1:<0.4203.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:26] [ns_1@127.0.0.1:<0.4287.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:26] [ns_1@127.0.0.1:<0.4195.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:27] [ns_1@127.0.0.1:<0.4295.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:27] [ns_1@127.0.0.1:<0.4215.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:28] [ns_1@127.0.0.1:<0.4301.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:28] [ns_1@127.0.0.1:<0.4208.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:29] [ns_1@127.0.0.1:<0.4306.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:29] [ns_1@127.0.0.1:<0.4230.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:29] [ns_1@127.0.0.1:<0.4320.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:30] [ns_1@127.0.0.1:<0.4312.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:47:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4292.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:47:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4327.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:47:30] [ns_1@127.0.0.1:<0.4220.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:30] [ns_1@127.0.0.1:<0.4320.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:31] [ns_1@127.0.0.1:<0.4316.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:31] [ns_1@127.0.0.1:<0.4241.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:31] [ns_1@127.0.0.1:<0.4320.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:32] [ns_1@127.0.0.1:<0.4330.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:32] [ns_1@127.0.0.1:<0.4234.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:32] [ns_1@127.0.0.1:<0.4320.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:33] [ns_1@127.0.0.1:<0.4335.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:33] [ns_1@127.0.0.1:<0.4254.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:47:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.4320.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:47:34] [ns_1@127.0.0.1:<0.4342.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:34] [ns_1@127.0.0.1:<0.4245.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:35] [ns_1@127.0.0.1:<0.4348.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:47:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4327.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:47:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4360.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:47:35] [ns_1@127.0.0.1:<0.4272.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:36] [ns_1@127.0.0.1:<0.4355.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:36] [ns_1@127.0.0.1:<0.4265.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:37] [ns_1@127.0.0.1:<0.4364.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:37] [ns_1@127.0.0.1:<0.4285.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:38] [ns_1@127.0.0.1:<0.4371.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:38] [ns_1@127.0.0.1:<0.4277.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:39] [ns_1@127.0.0.1:<0.4375.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:39] [ns_1@127.0.0.1:<0.4388.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:39] [ns_1@127.0.0.1:<0.4299.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:47:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4360.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:47:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4394.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:47:40] [ns_1@127.0.0.1:<0.4381.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:40] [ns_1@127.0.0.1:<0.4388.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:40] [ns_1@127.0.0.1:<0.4289.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:41] [ns_1@127.0.0.1:<0.4391.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:41] [ns_1@127.0.0.1:<0.4388.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:41] [ns_1@127.0.0.1:<0.4310.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:41] [ns_1@127.0.0.1:<0.4324.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:41] [ns_1@127.0.0.1:<0.4340.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:42] [ns_1@127.0.0.1:<0.4400.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:42] [ns_1@127.0.0.1:<0.4388.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:42] [ns_1@127.0.0.1:<0.4304.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:43] [ns_1@127.0.0.1:<0.4408.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:47:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.4388.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:47:43] [ns_1@127.0.0.1:<0.4353.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:44] [ns_1@127.0.0.1:<0.4417.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:44] [ns_1@127.0.0.1:<0.4314.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:47:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4394.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:47:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4432.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:47:45] [ns_1@127.0.0.1:<0.4410.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:45] [ns_1@127.0.0.1:<0.4369.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:46] [ns_1@127.0.0.1:<0.4429.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:46] [ns_1@127.0.0.1:<0.4333.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:47] [ns_1@127.0.0.1:<0.4412.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:47] [ns_1@127.0.0.1:<0.4379.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:48] [ns_1@127.0.0.1:<0.4443.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:48] [ns_1@127.0.0.1:<0.4346.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:49] [ns_1@127.0.0.1:<0.4424.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:49] [ns_1@127.0.0.1:<0.4460.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:49] [ns_1@127.0.0.1:<0.4395.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:47:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4432.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:47:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4466.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:47:50] [ns_1@127.0.0.1:<0.4454.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:50] [ns_1@127.0.0.1:<0.4460.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:50] [ns_1@127.0.0.1:<0.4357.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:51] [ns_1@127.0.0.1:<0.4439.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:51] [ns_1@127.0.0.1:<0.4460.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:51] [ns_1@127.0.0.1:<0.4414.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:52] [ns_1@127.0.0.1:<0.4474.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:52] [ns_1@127.0.0.1:<0.4460.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:52] [ns_1@127.0.0.1:<0.4373.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:53] [ns_1@127.0.0.1:<0.4450.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:47:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.4460.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:47:54] [ns_1@127.0.0.1:<0.4426.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:54] [ns_1@127.0.0.1:<0.4486.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:55] [ns_1@127.0.0.1:<0.4384.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:47:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4466.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:47:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4501.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:47:55] [ns_1@127.0.0.1:<0.4463.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:56] [ns_1@127.0.0.1:<0.4441.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:56] [ns_1@127.0.0.1:<0.4498.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:57] [ns_1@127.0.0.1:<0.4402.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:57] [ns_1@127.0.0.1:<0.4481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:58] [ns_1@127.0.0.1:<0.4452.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:58] [ns_1@127.0.0.1:<0.4513.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:59] [ns_1@127.0.0.1:<0.4419.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:47:59] [ns_1@127.0.0.1:<0.4494.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:47:59] [ns_1@127.0.0.1:<0.4545.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:47:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,751675,549261}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38004384}, {processes,10200328}, {processes_used,8574848}, {system,27804056}, {atom,1306681}, {atom_used,1284164}, {binary,612544}, {code,12859877}, {ets,2212344}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,2697}, {memory_data,{4040077312,4012146688,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27524 kB\nBuffers: 58804 kB\nCached: 3531876 kB\nSwapCached: 0 kB\nActive: 305756 kB\nInactive: 3444652 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27524 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 4 kB\nWriteback: 0 kB\nAnonPages: 159732 kB\nMapped: 24868 kB\nSlab: 134396 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577804 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616641024}, {buffered_memory,60215296}, {free_memory,28184576}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{2692286,0}}, {context_switches,{987795,0}}, {garbage_collection,{512986,672041309,0}}, {io,{{input,21161576},{output,34815170}}}, {reductions,{215945827,664671}}, {run_queue,0}, {runtime,{40230,150}}]}]}] [stats:error] [2012-03-26 1:48:00] [ns_1@127.0.0.1:<0.4467.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:48:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4501.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:48:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4552.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:48:00] [ns_1@127.0.0.1:<0.4523.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:00] [ns_1@127.0.0.1:<0.4545.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:01] [ns_1@127.0.0.1:<0.4433.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:01] [ns_1@127.0.0.1:<0.4508.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:01] [ns_1@127.0.0.1:<0.4545.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:02] [ns_1@127.0.0.1:<0.4483.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:02] [ns_1@127.0.0.1:<0.4558.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:02] [ns_1@127.0.0.1:<0.4545.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:03] [ns_1@127.0.0.1:<0.4445.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:03] [ns_1@127.0.0.1:<0.4519.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:48:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.4545.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:48:04] [ns_1@127.0.0.1:<0.4496.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:04] [ns_1@127.0.0.1:<0.4571.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:05] [ns_1@127.0.0.1:<0.4456.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:48:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4552.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:48:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4585.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:48:05] [ns_1@127.0.0.1:<0.4549.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:06] [ns_1@127.0.0.1:<0.4510.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:06] [ns_1@127.0.0.1:<0.4582.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:07] [ns_1@127.0.0.1:<0.4476.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:07] [ns_1@127.0.0.1:<0.4565.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:08] [ns_1@127.0.0.1:<0.4521.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:08] [ns_1@127.0.0.1:<0.4598.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:09] [ns_1@127.0.0.1:<0.4488.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:09] [ns_1@127.0.0.1:<0.4578.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:09] [ns_1@127.0.0.1:<0.4617.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:10] [ns_1@127.0.0.1:<0.4555.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:48:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4585.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:48:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4623.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:48:10] [ns_1@127.0.0.1:<0.4609.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:10] [ns_1@127.0.0.1:<0.4617.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:11] [ns_1@127.0.0.1:<0.4502.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:11] [ns_1@127.0.0.1:<0.4594.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:11] [ns_1@127.0.0.1:<0.4617.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:12] [ns_1@127.0.0.1:<0.4567.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:12] [ns_1@127.0.0.1:<0.4629.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:12] [ns_1@127.0.0.1:<0.4617.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:13] [ns_1@127.0.0.1:<0.4515.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:48:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.4617.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:48:13] [ns_1@127.0.0.1:<0.4604.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:14] [ns_1@127.0.0.1:<0.4580.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:14] [ns_1@127.0.0.1:<0.4642.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:48:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4623.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:48:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4655.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:48:15] [ns_1@127.0.0.1:<0.4525.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:15] [ns_1@127.0.0.1:<0.4620.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:16] [ns_1@127.0.0.1:<0.4596.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:16] [ns_1@127.0.0.1:<0.4656.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:17] [ns_1@127.0.0.1:<0.4560.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:17] [ns_1@127.0.0.1:<0.4637.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:18] [ns_1@127.0.0.1:<0.4606.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:18] [ns_1@127.0.0.1:<0.4668.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:19] [ns_1@127.0.0.1:<0.4573.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:19] [ns_1@127.0.0.1:<0.4683.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:19] [ns_1@127.0.0.1:<0.4649.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:48:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4655.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:48:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4689.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:48:20] [ns_1@127.0.0.1:<0.4626.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:20] [ns_1@127.0.0.1:<0.4683.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:20] [ns_1@127.0.0.1:<0.4679.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:21] [ns_1@127.0.0.1:<0.4589.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:21] [ns_1@127.0.0.1:<0.4683.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:21] [ns_1@127.0.0.1:<0.4664.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:22] [ns_1@127.0.0.1:<0.4639.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:22] [ns_1@127.0.0.1:<0.4683.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:22] [ns_1@127.0.0.1:<0.4699.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:23] [ns_1@127.0.0.1:<0.4600.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:48:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.4683.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:48:23] [ns_1@127.0.0.1:<0.4675.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:24] [ns_1@127.0.0.1:<0.4651.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:24] [ns_1@127.0.0.1:<0.4711.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:48:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4689.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:48:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4724.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:48:25] [ns_1@127.0.0.1:<0.4611.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:25] [ns_1@127.0.0.1:<0.4690.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:26] [ns_1@127.0.0.1:<0.4666.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:26] [ns_1@127.0.0.1:<0.4725.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:27] [ns_1@127.0.0.1:<0.4631.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:27] [ns_1@127.0.0.1:<0.4706.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:28] [ns_1@127.0.0.1:<0.4677.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:28] [ns_1@127.0.0.1:<0.4738.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:29] [ns_1@127.0.0.1:<0.4647.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:29] [ns_1@127.0.0.1:<0.4752.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:30] [ns_1@127.0.0.1:<0.4719.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:48:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4724.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:48:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4759.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:48:30] [ns_1@127.0.0.1:<0.4697.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:30] [ns_1@127.0.0.1:<0.4752.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:31] [ns_1@127.0.0.1:<0.4748.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:31] [ns_1@127.0.0.1:<0.4662.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:31] [ns_1@127.0.0.1:<0.4752.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:32] [ns_1@127.0.0.1:<0.4733.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:32] [ns_1@127.0.0.1:<0.4709.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:32] [ns_1@127.0.0.1:<0.4752.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:33] [ns_1@127.0.0.1:<0.4767.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:33] [ns_1@127.0.0.1:<0.4673.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:48:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.4752.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:48:34] [ns_1@127.0.0.1:<0.4744.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:34] [ns_1@127.0.0.1:<0.4721.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:35] [ns_1@127.0.0.1:<0.4780.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:48:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4759.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:48:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4792.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:48:35] [ns_1@127.0.0.1:<0.4686.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:36] [ns_1@127.0.0.1:<0.4760.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:36] [ns_1@127.0.0.1:<0.4736.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:37] [ns_1@127.0.0.1:<0.4796.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:37] [ns_1@127.0.0.1:<0.4704.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:38] [ns_1@127.0.0.1:<0.4774.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:38] [ns_1@127.0.0.1:<0.4746.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:39] [ns_1@127.0.0.1:<0.4807.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:39] [ns_1@127.0.0.1:<0.4717.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:39] [ns_1@127.0.0.1:<0.4822.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:40] [ns_1@127.0.0.1:<0.4787.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:48:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4792.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:48:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4828.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:48:40] [ns_1@127.0.0.1:<0.4765.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:40] [ns_1@127.0.0.1:<0.4822.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:41] [ns_1@127.0.0.1:<0.4818.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:41] [ns_1@127.0.0.1:<0.4731.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:41] [ns_1@127.0.0.1:<0.4822.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:42] [ns_1@127.0.0.1:<0.4803.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:42] [ns_1@127.0.0.1:<0.4813.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:42] [ns_1@127.0.0.1:<0.4831.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:42] [ns_1@127.0.0.1:<0.4778.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:42] [ns_1@127.0.0.1:<0.4822.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:43] [ns_1@127.0.0.1:<0.4836.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:43] [ns_1@127.0.0.1:<0.4742.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:48:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.4822.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:48:44] [ns_1@127.0.0.1:<0.4848.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:44] [ns_1@127.0.0.1:<0.4789.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:45] [ns_1@127.0.0.1:<0.4853.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:48:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4828.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:48:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4866.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:48:45] [ns_1@127.0.0.1:<0.4756.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:46] [ns_1@127.0.0.1:<0.4860.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:46] [ns_1@127.0.0.1:<0.4805.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:47] [ns_1@127.0.0.1:<0.4869.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:47] [ns_1@127.0.0.1:<0.4772.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:48] [ns_1@127.0.0.1:<0.4875.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:48] [ns_1@127.0.0.1:<0.4816.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:49] [ns_1@127.0.0.1:<0.4881.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:49] [ns_1@127.0.0.1:<0.4892.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:49] [ns_1@127.0.0.1:<0.4785.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:48:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4866.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:48:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4898.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:48:50] [ns_1@127.0.0.1:<0.4886.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:50] [ns_1@127.0.0.1:<0.4892.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:50] [ns_1@127.0.0.1:<0.4834.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:51] [ns_1@127.0.0.1:<0.4895.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:51] [ns_1@127.0.0.1:<0.4892.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:51] [ns_1@127.0.0.1:<0.4801.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:52] [ns_1@127.0.0.1:<0.4905.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:52] [ns_1@127.0.0.1:<0.4892.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:52] [ns_1@127.0.0.1:<0.4851.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:53] [ns_1@127.0.0.1:<0.4913.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:48:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.4892.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:48:53] [ns_1@127.0.0.1:<0.4811.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:54] [ns_1@127.0.0.1:<0.4918.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:54] [ns_1@127.0.0.1:<0.4863.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:48:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4898.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:48:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4933.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:48:55] [ns_1@127.0.0.1:<0.4926.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:55] [ns_1@127.0.0.1:<0.4825.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:56] [ns_1@127.0.0.1:<0.4930.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:56] [ns_1@127.0.0.1:<0.4877.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:57] [ns_1@127.0.0.1:<0.4940.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:57] [ns_1@127.0.0.1:<0.4842.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:58] [ns_1@127.0.0.1:<0.4945.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:58] [ns_1@127.0.0.1:<0.4888.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:48:59] [ns_1@127.0.0.1:<0.4951.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:48:59] [ns_1@127.0.0.1:<0.4962.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:48:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,751735,576342}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38216952}, {processes,10373608}, {processes_used,8748128}, {system,27843344}, {atom,1306681}, {atom_used,1284164}, {binary,614040}, {code,12859877}, {ets,2243528}]}, {system_stats, [{cpu_utilization_rate,25.56390977443609}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,2757}, {memory_data,{4040077312,4011892736,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27276 kB\nBuffers: 58944 kB\nCached: 3532020 kB\nSwapCached: 0 kB\nActive: 305872 kB\nInactive: 3444864 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27276 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 76 kB\nWriteback: 0 kB\nAnonPages: 159748 kB\nMapped: 24868 kB\nSlab: 134416 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577804 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616788480}, {buffered_memory,60358656}, {free_memory,27930624}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{2752313,0}}, {context_switches,{1001137,0}}, {garbage_collection,{520135,683281083,0}}, {io,{{input,21438191},{output,35482125}}}, {reductions,{218571007,669516}}, {run_queue,0}, {runtime,{40820,150}}]}]}] [stats:error] [2012-03-26 1:48:59] [ns_1@127.0.0.1:<0.4844.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:49:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4933.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:49:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4969.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:49:00] [ns_1@127.0.0.1:<0.4955.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:00] [ns_1@127.0.0.1:<0.4962.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:00] [ns_1@127.0.0.1:<0.4908.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:01] [ns_1@127.0.0.1:<0.4966.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:01] [ns_1@127.0.0.1:<0.4962.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:01] [ns_1@127.0.0.1:<0.4846.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:02] [ns_1@127.0.0.1:<0.4975.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:02] [ns_1@127.0.0.1:<0.4962.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:02] [ns_1@127.0.0.1:<0.4920.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:03] [ns_1@127.0.0.1:<0.4982.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:49:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.4962.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:49:04] [ns_1@127.0.0.1:<0.4858.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:04] [ns_1@127.0.0.1:<0.4988.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:05] [ns_1@127.0.0.1:<0.4934.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:49:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.4969.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:49:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5002.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:49:05] [ns_1@127.0.0.1:<0.4995.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:06] [ns_1@127.0.0.1:<0.4873.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:06] [ns_1@127.0.0.1:<0.4999.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:07] [ns_1@127.0.0.1:<0.4947.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:07] [ns_1@127.0.0.1:<0.5011.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:08] [ns_1@127.0.0.1:<0.4884.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:08] [ns_1@127.0.0.1:<0.5015.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:09] [ns_1@127.0.0.1:<0.4957.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:09] [ns_1@127.0.0.1:<0.5021.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:09] [ns_1@127.0.0.1:<0.5034.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:10] [ns_1@127.0.0.1:<0.4899.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:49:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5002.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:49:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5040.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:49:10] [ns_1@127.0.0.1:<0.5026.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:10] [ns_1@127.0.0.1:<0.5034.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:11] [ns_1@127.0.0.1:<0.4977.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:11] [ns_1@127.0.0.1:<0.5037.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:11] [ns_1@127.0.0.1:<0.5034.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:12] [ns_1@127.0.0.1:<0.4915.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:12] [ns_1@127.0.0.1:<0.5046.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:12] [ns_1@127.0.0.1:<0.5034.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:13] [ns_1@127.0.0.1:<0.4990.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:13] [ns_1@127.0.0.1:<0.5054.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:49:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.5034.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:49:14] [ns_1@127.0.0.1:<0.4928.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:14] [ns_1@127.0.0.1:<0.5059.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:15] [ns_1@127.0.0.1:<0.5003.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:49:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5040.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:49:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5074.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:49:15] [ns_1@127.0.0.1:<0.5066.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:16] [ns_1@127.0.0.1:<0.4942.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:16] [ns_1@127.0.0.1:<0.5071.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:17] [ns_1@127.0.0.1:<0.5017.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:17] [ns_1@127.0.0.1:<0.5081.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:18] [ns_1@127.0.0.1:<0.4953.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:18] [ns_1@127.0.0.1:<0.5085.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:19] [ns_1@127.0.0.1:<0.5028.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:19] [ns_1@127.0.0.1:<0.5092.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:19] [ns_1@127.0.0.1:<0.5102.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:20] [ns_1@127.0.0.1:<0.4970.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:49:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5074.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:49:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5108.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:49:20] [ns_1@127.0.0.1:<0.5096.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:20] [ns_1@127.0.0.1:<0.5102.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:21] [ns_1@127.0.0.1:<0.5048.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:21] [ns_1@127.0.0.1:<0.5105.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:21] [ns_1@127.0.0.1:<0.5102.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:22] [ns_1@127.0.0.1:<0.4984.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:22] [ns_1@127.0.0.1:<0.5116.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:22] [ns_1@127.0.0.1:<0.5102.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:23] [ns_1@127.0.0.1:<0.5061.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:49:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.5102.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:49:23] [ns_1@127.0.0.1:<0.5123.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:24] [ns_1@127.0.0.1:<0.4997.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:24] [ns_1@127.0.0.1:<0.5128.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:49:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5108.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:49:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5141.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:49:25] [ns_1@127.0.0.1:<0.5077.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:25] [ns_1@127.0.0.1:<0.5136.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:26] [ns_1@127.0.0.1:<0.5013.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:26] [ns_1@127.0.0.1:<0.5142.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:27] [ns_1@127.0.0.1:<0.5087.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:27] [ns_1@127.0.0.1:<0.5150.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:28] [ns_1@127.0.0.1:<0.5023.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:28] [ns_1@127.0.0.1:<0.5155.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:29] [ns_1@127.0.0.1:<0.5098.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:29] [ns_1@127.0.0.1:<0.5169.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:29] [ns_1@127.0.0.1:<0.5161.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:49:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5141.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:49:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5176.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:49:30] [ns_1@127.0.0.1:<0.5043.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:30] [ns_1@127.0.0.1:<0.5169.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:30] [ns_1@127.0.0.1:<0.5165.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:31] [ns_1@127.0.0.1:<0.5118.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:31] [ns_1@127.0.0.1:<0.5169.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:31] [ns_1@127.0.0.1:<0.5177.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:32] [ns_1@127.0.0.1:<0.5056.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:32] [ns_1@127.0.0.1:<0.5169.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:32] [ns_1@127.0.0.1:<0.5184.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:33] [ns_1@127.0.0.1:<0.5133.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:49:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.5169.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:49:33] [ns_1@127.0.0.1:<0.5191.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:34] [ns_1@127.0.0.1:<0.5068.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:34] [ns_1@127.0.0.1:<0.5197.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:49:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5176.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:49:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5209.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:49:35] [ns_1@127.0.0.1:<0.5148.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:35] [ns_1@127.0.0.1:<0.5204.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:36] [ns_1@127.0.0.1:<0.5083.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:36] [ns_1@127.0.0.1:<0.5210.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:37] [ns_1@127.0.0.1:<0.5159.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:37] [ns_1@127.0.0.1:<0.5220.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:38] [ns_1@127.0.0.1:<0.5094.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:38] [ns_1@127.0.0.1:<0.5224.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:39] [ns_1@127.0.0.1:<0.5173.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:39] [ns_1@127.0.0.1:<0.5239.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:40] [ns_1@127.0.0.1:<0.5230.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:49:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5209.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:49:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5245.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:49:40] [ns_1@127.0.0.1:<0.5112.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:40] [ns_1@127.0.0.1:<0.5239.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:41] [ns_1@127.0.0.1:<0.5235.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:41] [ns_1@127.0.0.1:<0.5189.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:41] [ns_1@127.0.0.1:<0.5239.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:42] [ns_1@127.0.0.1:<0.5246.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:42] [ns_1@127.0.0.1:<0.5125.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:42] [ns_1@127.0.0.1:<0.5138.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:42] [ns_1@127.0.0.1:<0.5153.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:42] [ns_1@127.0.0.1:<0.5239.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:43] [ns_1@127.0.0.1:<0.5253.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:49:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.5239.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:49:44] [ns_1@127.0.0.1:<0.5261.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:44] [ns_1@127.0.0.1:<0.5163.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:45] [ns_1@127.0.0.1:<0.5202.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:49:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5245.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:49:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5281.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:49:45] [ns_1@127.0.0.1:<0.5218.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:46] [ns_1@127.0.0.1:<0.5263.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:46] [ns_1@127.0.0.1:<0.5182.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:47] [ns_1@127.0.0.1:<0.5284.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:47] [ns_1@127.0.0.1:<0.5228.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:48] [ns_1@127.0.0.1:<0.5265.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:48] [ns_1@127.0.0.1:<0.5195.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:49] [ns_1@127.0.0.1:<0.5294.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:49] [ns_1@127.0.0.1:<0.5242.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:49] [ns_1@127.0.0.1:<0.5309.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:50] [ns_1@127.0.0.1:<0.5275.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:49:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5281.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:49:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5315.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:49:50] [ns_1@127.0.0.1:<0.5206.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:50] [ns_1@127.0.0.1:<0.5309.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:51] [ns_1@127.0.0.1:<0.5305.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:51] [ns_1@127.0.0.1:<0.5259.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:51] [ns_1@127.0.0.1:<0.5309.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:52] [ns_1@127.0.0.1:<0.5290.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:52] [ns_1@127.0.0.1:<0.5222.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:52] [ns_1@127.0.0.1:<0.5309.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:53] [ns_1@127.0.0.1:<0.5325.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:53] [ns_1@127.0.0.1:<0.5273.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:49:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.5309.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:49:54] [ns_1@127.0.0.1:<0.5301.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:54] [ns_1@127.0.0.1:<0.5233.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:55] [ns_1@127.0.0.1:<0.5337.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:49:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5315.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:49:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5350.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:49:55] [ns_1@127.0.0.1:<0.5288.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:56] [ns_1@127.0.0.1:<0.5319.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:56] [ns_1@127.0.0.1:<0.5251.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:57] [ns_1@127.0.0.1:<0.5353.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:57] [ns_1@127.0.0.1:<0.5299.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:58] [ns_1@127.0.0.1:<0.5332.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:58] [ns_1@127.0.0.1:<0.5268.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:49:59] [ns_1@127.0.0.1:<0.5366.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:49:59] [ns_1@127.0.0.1:<0.5377.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:49:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,751795,605377}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37960432}, {processes,10141352}, {processes_used,8515872}, {system,27819080}, {atom,1306681}, {atom_used,1284164}, {binary,613360}, {code,12859877}, {ets,2214528}]}, {system_stats, [{cpu_utilization_rate,25.25}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,2817}, {memory_data,{4040077312,4012400640,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26756 kB\nBuffers: 59012 kB\nCached: 3532184 kB\nSwapCached: 0 kB\nActive: 306068 kB\nInactive: 3444900 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26756 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 64 kB\nWriteback: 0 kB\nAnonPages: 159760 kB\nMapped: 24868 kB\nSlab: 134424 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577804 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616956416}, {buffered_memory,60428288}, {free_memory,27398144}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{2812342,0}}, {context_switches,{1014174,0}}, {garbage_collection,{527483,694522456,0}}, {io,{{input,21468789},{output,35921972}}}, {reductions,{221196296,652331}}, {run_queue,0}, {runtime,{41420,170}}]}]}] [stats:error] [2012-03-26 1:49:59] [ns_1@127.0.0.1:<0.5312.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:50:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5350.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:50:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5384.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:50:00] [ns_1@127.0.0.1:<0.5345.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:00] [ns_1@127.0.0.1:<0.5377.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:00] [ns_1@127.0.0.1:<0.5278.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:01] [ns_1@127.0.0.1:<0.5380.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:01] [ns_1@127.0.0.1:<0.5377.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:01] [ns_1@127.0.0.1:<0.5330.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:02] [ns_1@127.0.0.1:<0.5359.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:02] [ns_1@127.0.0.1:<0.5377.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:02] [ns_1@127.0.0.1:<0.5292.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:03] [ns_1@127.0.0.1:<0.5397.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:50:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.5377.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:50:03] [ns_1@127.0.0.1:<0.5343.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:04] [ns_1@127.0.0.1:<0.5370.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:04] [ns_1@127.0.0.1:<0.5303.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:50:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5384.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:50:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5417.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:50:05] [ns_1@127.0.0.1:<0.5410.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:05] [ns_1@127.0.0.1:<0.5357.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:06] [ns_1@127.0.0.1:<0.5390.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:06] [ns_1@127.0.0.1:<0.5323.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:07] [ns_1@127.0.0.1:<0.5426.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:07] [ns_1@127.0.0.1:<0.5368.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:08] [ns_1@127.0.0.1:<0.5403.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:08] [ns_1@127.0.0.1:<0.5335.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:09] [ns_1@127.0.0.1:<0.5436.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:09] [ns_1@127.0.0.1:<0.5449.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:09] [ns_1@127.0.0.1:<0.5385.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:50:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5417.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:50:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5455.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:50:10] [ns_1@127.0.0.1:<0.5414.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:10] [ns_1@127.0.0.1:<0.5449.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:10] [ns_1@127.0.0.1:<0.5347.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:11] [ns_1@127.0.0.1:<0.5452.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:11] [ns_1@127.0.0.1:<0.5449.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:11] [ns_1@127.0.0.1:<0.5399.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:12] [ns_1@127.0.0.1:<0.5430.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:12] [ns_1@127.0.0.1:<0.5449.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:13] [ns_1@127.0.0.1:<0.5362.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:13] [ns_1@127.0.0.1:<0.5469.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:50:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.5449.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:50:14] [ns_1@127.0.0.1:<0.5412.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:14] [ns_1@127.0.0.1:<0.5441.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:15] [ns_1@127.0.0.1:<0.5372.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:50:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5455.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:50:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5489.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:50:15] [ns_1@127.0.0.1:<0.5481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:16] [ns_1@127.0.0.1:<0.5428.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:16] [ns_1@127.0.0.1:<0.5461.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:17] [ns_1@127.0.0.1:<0.5392.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:17] [ns_1@127.0.0.1:<0.5496.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:18] [ns_1@127.0.0.1:<0.5438.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:18] [ns_1@127.0.0.1:<0.5474.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:19] [ns_1@127.0.0.1:<0.5405.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:19] [ns_1@127.0.0.1:<0.5507.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:19] [ns_1@127.0.0.1:<0.5517.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:20] [ns_1@127.0.0.1:<0.5456.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:50:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5489.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:50:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5523.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:50:20] [ns_1@127.0.0.1:<0.5486.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:20] [ns_1@127.0.0.1:<0.5517.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:21] [ns_1@127.0.0.1:<0.5418.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:21] [ns_1@127.0.0.1:<0.5520.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:21] [ns_1@127.0.0.1:<0.5517.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:22] [ns_1@127.0.0.1:<0.5471.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:22] [ns_1@127.0.0.1:<0.5500.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:22] [ns_1@127.0.0.1:<0.5517.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:23] [ns_1@127.0.0.1:<0.5432.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:23] [ns_1@127.0.0.1:<0.5538.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:50:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.5517.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:50:24] [ns_1@127.0.0.1:<0.5483.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:24] [ns_1@127.0.0.1:<0.5511.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:25] [ns_1@127.0.0.1:<0.5443.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:50:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5523.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:50:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5558.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:50:25] [ns_1@127.0.0.1:<0.5551.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:26] [ns_1@127.0.0.1:<0.5498.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:26] [ns_1@127.0.0.1:<0.5531.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:27] [ns_1@127.0.0.1:<0.5463.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:27] [ns_1@127.0.0.1:<0.5565.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:28] [ns_1@127.0.0.1:<0.5509.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:28] [ns_1@127.0.0.1:<0.5543.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:29] [ns_1@127.0.0.1:<0.5476.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:29] [ns_1@127.0.0.1:<0.5576.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:29] [ns_1@127.0.0.1:<0.5586.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:30] [ns_1@127.0.0.1:<0.5524.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:50:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5558.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:50:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5593.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:50:30] [ns_1@127.0.0.1:<0.5555.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:30] [ns_1@127.0.0.1:<0.5586.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:31] [ns_1@127.0.0.1:<0.5490.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:31] [ns_1@127.0.0.1:<0.5590.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:31] [ns_1@127.0.0.1:<0.5586.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:32] [ns_1@127.0.0.1:<0.5540.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:32] [ns_1@127.0.0.1:<0.5570.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:32] [ns_1@127.0.0.1:<0.5586.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:33] [ns_1@127.0.0.1:<0.5502.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:50:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.5586.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:50:33] [ns_1@127.0.0.1:<0.5606.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:34] [ns_1@127.0.0.1:<0.5553.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:34] [ns_1@127.0.0.1:<0.5580.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:35] [ns_1@127.0.0.1:<0.5513.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:50:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5593.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:50:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5626.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:50:35] [ns_1@127.0.0.1:<0.5619.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:36] [ns_1@127.0.0.1:<0.5567.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:36] [ns_1@127.0.0.1:<0.5599.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:37] [ns_1@127.0.0.1:<0.5533.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:37] [ns_1@127.0.0.1:<0.5635.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:38] [ns_1@127.0.0.1:<0.5578.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:38] [ns_1@127.0.0.1:<0.5612.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:39] [ns_1@127.0.0.1:<0.5545.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:39] [ns_1@127.0.0.1:<0.5654.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:39] [ns_1@127.0.0.1:<0.5645.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:50:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5626.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:50:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5660.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:50:40] [ns_1@127.0.0.1:<0.5596.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:40] [ns_1@127.0.0.1:<0.5654.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:40] [ns_1@127.0.0.1:<0.5623.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:41] [ns_1@127.0.0.1:<0.5561.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:41] [ns_1@127.0.0.1:<0.5654.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:41] [ns_1@127.0.0.1:<0.5661.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:42] [ns_1@127.0.0.1:<0.5608.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:42] [ns_1@127.0.0.1:<0.5654.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:42] [ns_1@127.0.0.1:<0.5639.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:42] [ns_1@127.0.0.1:<0.5650.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:42] [ns_1@127.0.0.1:<0.5668.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:42] [ns_1@127.0.0.1:<0.5572.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:42] [ns_1@127.0.0.1:<0.5582.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:43] [ns_1@127.0.0.1:<0.5601.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:50:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.5654.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:50:43] [ns_1@127.0.0.1:<0.5676.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:44] [ns_1@127.0.0.1:<0.5621.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:44] [ns_1@127.0.0.1:<0.5689.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:50:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5660.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:50:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5702.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:50:45] [ns_1@127.0.0.1:<0.5617.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:45] [ns_1@127.0.0.1:<0.5696.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:46] [ns_1@127.0.0.1:<0.5637.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:46] [ns_1@127.0.0.1:<0.5703.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:47] [ns_1@127.0.0.1:<0.5632.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:48] [ns_1@127.0.0.1:<0.5711.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:48] [ns_1@127.0.0.1:<0.5648.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:49] [ns_1@127.0.0.1:<0.5715.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:49] [ns_1@127.0.0.1:<0.5643.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:49] [ns_1@127.0.0.1:<0.5730.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:50] [ns_1@127.0.0.1:<0.5722.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:50:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5702.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:50:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5736.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:50:50] [ns_1@127.0.0.1:<0.5666.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:50] [ns_1@127.0.0.1:<0.5730.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:51] [ns_1@127.0.0.1:<0.5726.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:51] [ns_1@127.0.0.1:<0.5657.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:51] [ns_1@127.0.0.1:<0.5730.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:52] [ns_1@127.0.0.1:<0.5737.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:52] [ns_1@127.0.0.1:<0.5679.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:52] [ns_1@127.0.0.1:<0.5730.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:53] [ns_1@127.0.0.1:<0.5746.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:53] [ns_1@127.0.0.1:<0.5674.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:50:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.5730.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:50:54] [ns_1@127.0.0.1:<0.5753.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:54] [ns_1@127.0.0.1:<0.5681.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:55] [ns_1@127.0.0.1:<0.5758.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:50:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5736.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:50:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5771.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:50:55] [ns_1@127.0.0.1:<0.5694.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:56] [ns_1@127.0.0.1:<0.5766.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:56] [ns_1@127.0.0.1:<0.5683.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:57] [ns_1@127.0.0.1:<0.5772.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:57] [ns_1@127.0.0.1:<0.5709.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:58] [ns_1@127.0.0.1:<0.5780.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:58] [ns_1@127.0.0.1:<0.5685.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:59] [ns_1@127.0.0.1:<0.5785.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:50:59] [ns_1@127.0.0.1:<0.5720.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:50:59] [ns_1@127.0.0.1:<0.5805.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:50:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,751855,632289}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38032944}, {processes,10183648}, {processes_used,8558168}, {system,27849296}, {atom,1306681}, {atom_used,1284164}, {binary,611624}, {code,12859877}, {ets,2243248}]}, {system_stats, [{cpu_utilization_rate,25.18703241895262}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,2877}, {memory_data,{4040077312,4012544000,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26872 kB\nBuffers: 59092 kB\nCached: 3532344 kB\nSwapCached: 0 kB\nActive: 306228 kB\nInactive: 3445012 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26872 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 68 kB\nWriteback: 0 kB\nAnonPages: 159788 kB\nMapped: 24868 kB\nSlab: 134440 kB\nPageTables: 6460 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577804 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3617120256}, {buffered_memory,60510208}, {free_memory,27516928}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{2872369,0}}, {context_switches,{1027321,0}}, {garbage_collection,{534433,705892294,0}}, {io,{{input,21499396},{output,36367838}}}, {reductions,{223789658,694940}}, {run_queue,0}, {runtime,{42030,160}}]}]}] [stats:error] [2012-03-26 1:51:00] [ns_1@127.0.0.1:<0.5791.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:51:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5771.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:51:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5812.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:51:00] [ns_1@127.0.0.1:<0.5805.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:00] [ns_1@127.0.0.1:<0.5687.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:01] [ns_1@127.0.0.1:<0.5800.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:01] [ns_1@127.0.0.1:<0.5805.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:01] [ns_1@127.0.0.1:<0.5733.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:02] [ns_1@127.0.0.1:<0.5815.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:02] [ns_1@127.0.0.1:<0.5805.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:02] [ns_1@127.0.0.1:<0.5699.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:03] [ns_1@127.0.0.1:<0.5831.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:51:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.5805.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:51:03] [ns_1@127.0.0.1:<0.5751.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:04] [ns_1@127.0.0.1:<0.5837.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:04] [ns_1@127.0.0.1:<0.5713.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:51:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5812.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:51:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5851.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:51:05] [ns_1@127.0.0.1:<0.5844.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:05] [ns_1@127.0.0.1:<0.5764.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:06] [ns_1@127.0.0.1:<0.5848.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:06] [ns_1@127.0.0.1:<0.5724.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:07] [ns_1@127.0.0.1:<0.5860.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:07] [ns_1@127.0.0.1:<0.5778.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:08] [ns_1@127.0.0.1:<0.5864.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:08] [ns_1@127.0.0.1:<0.5744.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:09] [ns_1@127.0.0.1:<0.5870.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:09] [ns_1@127.0.0.1:<0.5883.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:09] [ns_1@127.0.0.1:<0.5789.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:51:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5851.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:51:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5889.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:51:10] [ns_1@127.0.0.1:<0.5875.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:10] [ns_1@127.0.0.1:<0.5883.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:10] [ns_1@127.0.0.1:<0.5756.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:11] [ns_1@127.0.0.1:<0.5886.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:11] [ns_1@127.0.0.1:<0.5883.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:11] [ns_1@127.0.0.1:<0.5809.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:12] [ns_1@127.0.0.1:<0.5895.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:12] [ns_1@127.0.0.1:<0.5883.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:13] [ns_1@127.0.0.1:<0.5768.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:13] [ns_1@127.0.0.1:<0.5903.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:51:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.5883.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:51:14] [ns_1@127.0.0.1:<0.5833.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:14] [ns_1@127.0.0.1:<0.5908.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:15] [ns_1@127.0.0.1:<0.5783.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:51:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5889.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:51:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5923.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:51:15] [ns_1@127.0.0.1:<0.5915.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:16] [ns_1@127.0.0.1:<0.5846.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:16] [ns_1@127.0.0.1:<0.5920.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:17] [ns_1@127.0.0.1:<0.5793.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:17] [ns_1@127.0.0.1:<0.5930.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:18] [ns_1@127.0.0.1:<0.5862.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:18] [ns_1@127.0.0.1:<0.5934.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:19] [ns_1@127.0.0.1:<0.5826.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:19] [ns_1@127.0.0.1:<0.5941.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:19] [ns_1@127.0.0.1:<0.5951.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:20] [ns_1@127.0.0.1:<0.5872.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:51:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5923.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:51:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5957.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:51:20] [ns_1@127.0.0.1:<0.5945.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:20] [ns_1@127.0.0.1:<0.5951.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:21] [ns_1@127.0.0.1:<0.5839.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:21] [ns_1@127.0.0.1:<0.5954.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:21] [ns_1@127.0.0.1:<0.5951.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:22] [ns_1@127.0.0.1:<0.5890.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:22] [ns_1@127.0.0.1:<0.5965.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:22] [ns_1@127.0.0.1:<0.5951.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:23] [ns_1@127.0.0.1:<0.5852.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:23] [ns_1@127.0.0.1:<0.5972.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:51:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.5951.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:51:24] [ns_1@127.0.0.1:<0.5905.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:24] [ns_1@127.0.0.1:<0.5977.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:25] [ns_1@127.0.0.1:<0.5866.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:51:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5957.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:51:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.5992.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:51:25] [ns_1@127.0.0.1:<0.5985.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:26] [ns_1@127.0.0.1:<0.5917.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:26] [ns_1@127.0.0.1:<0.5989.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:27] [ns_1@127.0.0.1:<0.5877.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:27] [ns_1@127.0.0.1:<0.5999.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:28] [ns_1@127.0.0.1:<0.5932.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:28] [ns_1@127.0.0.1:<0.6004.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:29] [ns_1@127.0.0.1:<0.5897.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:29] [ns_1@127.0.0.1:<0.6010.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:29] [ns_1@127.0.0.1:<0.6020.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:30] [ns_1@127.0.0.1:<0.5943.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:51:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.5992.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:51:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6027.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:51:30] [ns_1@127.0.0.1:<0.6014.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:30] [ns_1@127.0.0.1:<0.6020.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:31] [ns_1@127.0.0.1:<0.5910.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:31] [ns_1@127.0.0.1:<0.6024.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:31] [ns_1@127.0.0.1:<0.6020.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:32] [ns_1@127.0.0.1:<0.5958.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:32] [ns_1@127.0.0.1:<0.6020.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:32] [ns_1@127.0.0.1:<0.6033.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:33] [ns_1@127.0.0.1:<0.5924.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:51:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.6020.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:51:33] [ns_1@127.0.0.1:<0.6040.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:34] [ns_1@127.0.0.1:<0.5974.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:34] [ns_1@127.0.0.1:<0.6046.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:51:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6027.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:51:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6058.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:51:35] [ns_1@127.0.0.1:<0.5936.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:35] [ns_1@127.0.0.1:<0.6053.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:36] [ns_1@127.0.0.1:<0.5987.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:36] [ns_1@127.0.0.1:<0.6059.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:37] [ns_1@127.0.0.1:<0.5947.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:37] [ns_1@127.0.0.1:<0.6069.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:38] [ns_1@127.0.0.1:<0.6001.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:38] [ns_1@127.0.0.1:<0.6073.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:39] [ns_1@127.0.0.1:<0.5967.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:39] [ns_1@127.0.0.1:<0.6088.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:39] [ns_1@127.0.0.1:<0.6079.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:51:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6058.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:51:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6094.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:51:40] [ns_1@127.0.0.1:<0.6012.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:40] [ns_1@127.0.0.1:<0.6088.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:40] [ns_1@127.0.0.1:<0.6084.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:41] [ns_1@127.0.0.1:<0.5979.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:41] [ns_1@127.0.0.1:<0.6088.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:41] [ns_1@127.0.0.1:<0.6095.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:42] [ns_1@127.0.0.1:<0.6030.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:42] [ns_1@127.0.0.1:<0.6088.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:42] [ns_1@127.0.0.1:<0.6102.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:43] [ns_1@127.0.0.1:<0.5995.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:43] [ns_1@127.0.0.1:<0.6035.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:43] [ns_1@127.0.0.1:<0.6051.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason shutdown [ns_server:info] [2012-03-26 1:51:43] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:51:43] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:51:43] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:51:43] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:51:43] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:warn] [2012-03-26 1:51:48] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 1:51:48] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:51:48] [ns_1@127.0.0.1:<0.6067.1>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 1:51:48] [ns_1@127.0.0.1:<0.6082.1>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:51:48] [ns_1@127.0.0.1:<0.6154.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:48] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:51:48] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:51:48] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:51:48] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:51:48] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:51:48] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [stats:error] [2012-03-26 1:51:48] [ns_1@127.0.0.1:<0.6100.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:48] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:51:48] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:51:48] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:51:48] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:51:50: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 1:51:49] [ns_1@127.0.0.1:<0.6115.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:49] [ns_1@127.0.0.1:<0.6154.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:49] [ns_1@127.0.0.1:<0.6077.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.6154.1>} [stats:error] [2012-03-26 1:51:50] [ns_1@127.0.0.1:<0.6110.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:51:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6094.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:51:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6174.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:51:50] [ns_1@127.0.0.1:<0.6154.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:50] [ns_1@127.0.0.1:<0.6113.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:51] [ns_1@127.0.0.1:<0.6117.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:51] [ns_1@127.0.0.1:<0.6154.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:51] [ns_1@127.0.0.1:<0.6091.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:52] [ns_1@127.0.0.1:<0.6147.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:52] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:51:52] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.6154.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:51:52] [ns_1@127.0.0.1:<0.6164.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:53] [ns_1@127.0.0.1:<0.6120.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:53] [ns_1@127.0.0.1:<0.6108.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:54] [ns_1@127.0.0.1:<0.6148.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:54] [ns_1@127.0.0.1:<0.6182.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:55] [ns_1@127.0.0.1:<0.6123.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:51:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6174.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:51:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6208.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:51:55] [ns_1@127.0.0.1:<0.6171.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:56] [ns_1@127.0.0.1:<0.6149.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:56] [ns_1@127.0.0.1:<0.6194.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:57] [ns_1@127.0.0.1:<0.6125.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:57] [ns_1@127.0.0.1:<0.6189.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:58] [ns_1@127.0.0.1:<0.6150.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:58] [ns_1@127.0.0.1:<0.6205.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:59] [ns_1@127.0.0.1:<0.6127.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:51:59] [ns_1@127.0.0.1:<0.6201.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:51:59] [ns_1@127.0.0.1:<0.6237.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:51:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,751915,657458}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38134632}, {processes,10218440}, {processes_used,8592960}, {system,27916192}, {atom,1306681}, {atom_used,1284164}, {binary,639512}, {code,12859877}, {ets,2275784}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,2937}, {memory_data,{4040077312,4012797952,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25640 kB\nBuffers: 59252 kB\nCached: 3532096 kB\nSwapCached: 0 kB\nActive: 306292 kB\nInactive: 3444972 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25640 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 96 kB\nWriteback: 0 kB\nAnonPages: 160004 kB\nMapped: 24868 kB\nSlab: 134508 kB\nPageTables: 6464 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577604 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616866304}, {buffered_memory,60674048}, {free_memory,26255360}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{2932394,0}}, {context_switches,{1040949,0}}, {garbage_collection,{541933,717714412,0}}, {io,{{input,21778896},{output,37259527}}}, {reductions,{226623039,698643}}, {run_queue,0}, {runtime,{42760,200}}]}]}] [stats:error] [2012-03-26 1:52:00] [ns_1@127.0.0.1:<0.6159.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:52:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6208.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:52:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6244.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:52:00] [ns_1@127.0.0.1:<0.6220.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:00] [ns_1@127.0.0.1:<0.6237.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:01] [ns_1@127.0.0.1:<0.6167.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:01] [ns_1@127.0.0.1:<0.6215.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:01] [ns_1@127.0.0.1:<0.6237.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:02] [ns_1@127.0.0.1:<0.6176.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:02] [ns_1@127.0.0.1:<0.6230.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:02] [ns_1@127.0.0.1:<0.6237.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:03] [ns_1@127.0.0.1:<0.6185.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:03] [ns_1@127.0.0.1:<0.6226.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:52:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.6237.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:52:04] [ns_1@127.0.0.1:<0.6192.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:04] [ns_1@127.0.0.1:<0.6250.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:05] [ns_1@127.0.0.1:<0.6196.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:52:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6244.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:52:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6277.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:52:05] [ns_1@127.0.0.1:<0.6241.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:06] [ns_1@127.0.0.1:<0.6203.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:06] [ns_1@127.0.0.1:<0.6263.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:07] [ns_1@127.0.0.1:<0.6209.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:07] [ns_1@127.0.0.1:<0.6257.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:08] [ns_1@127.0.0.1:<0.6217.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:08] [ns_1@127.0.0.1:<0.6274.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:09] [ns_1@127.0.0.1:<0.6222.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:09] [ns_1@127.0.0.1:<0.6307.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:09] [ns_1@127.0.0.1:<0.6270.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:52:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6277.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:52:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6313.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:52:10] [ns_1@127.0.0.1:<0.6228.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:10] [ns_1@127.0.0.1:<0.6307.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:10] [ns_1@127.0.0.1:<0.6290.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:11] [ns_1@127.0.0.1:<0.6232.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:11] [ns_1@127.0.0.1:<0.6307.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:11] [ns_1@127.0.0.1:<0.6286.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:12] [ns_1@127.0.0.1:<0.6247.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:12] [ns_1@127.0.0.1:<0.6307.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:12] [ns_1@127.0.0.1:<0.6301.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:13] [ns_1@127.0.0.1:<0.6252.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:52:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.6307.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:52:13] [ns_1@127.0.0.1:<0.6296.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:14] [ns_1@127.0.0.1:<0.6259.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:14] [ns_1@127.0.0.1:<0.6321.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:52:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6313.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:52:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6347.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:52:15] [ns_1@127.0.0.1:<0.6265.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:15] [ns_1@127.0.0.1:<0.6314.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:16] [ns_1@127.0.0.1:<0.6272.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:16] [ns_1@127.0.0.1:<0.6334.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:17] [ns_1@127.0.0.1:<0.6281.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:17] [ns_1@127.0.0.1:<0.6329.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:18] [ns_1@127.0.0.1:<0.6288.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:18] [ns_1@127.0.0.1:<0.6348.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:19] [ns_1@127.0.0.1:<0.6294.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:19] [ns_1@127.0.0.1:<0.6375.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:19] [ns_1@127.0.0.1:<0.6341.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:52:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6347.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:52:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6381.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:52:20] [ns_1@127.0.0.1:<0.6298.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:20] [ns_1@127.0.0.1:<0.6375.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:20] [ns_1@127.0.0.1:<0.6360.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:21] [ns_1@127.0.0.1:<0.6310.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:21] [ns_1@127.0.0.1:<0.6375.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:21] [ns_1@127.0.0.1:<0.6356.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:22] [ns_1@127.0.0.1:<0.6319.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:22] [ns_1@127.0.0.1:<0.6375.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:23] [ns_1@127.0.0.1:<0.6371.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:23] [ns_1@127.0.0.1:<0.6327.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:52:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.6375.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:52:24] [ns_1@127.0.0.1:<0.6367.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:24] [ns_1@127.0.0.1:<0.6332.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:25] [ns_1@127.0.0.1:<0.6391.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:52:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6381.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:52:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6416.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:52:25] [ns_1@127.0.0.1:<0.6339.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:26] [ns_1@127.0.0.1:<0.6382.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:26] [ns_1@127.0.0.1:<0.6344.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:27] [ns_1@127.0.0.1:<0.6403.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:27] [ns_1@127.0.0.1:<0.6354.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:28] [ns_1@127.0.0.1:<0.6398.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:28] [ns_1@127.0.0.1:<0.6358.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:29] [ns_1@127.0.0.1:<0.6417.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:29] [ns_1@127.0.0.1:<0.6365.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:29] [ns_1@127.0.0.1:<0.6444.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:30] [ns_1@127.0.0.1:<0.6411.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:52:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6416.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:52:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6451.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:52:30] [ns_1@127.0.0.1:<0.6369.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:30] [ns_1@127.0.0.1:<0.6444.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:31] [ns_1@127.0.0.1:<0.6430.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:31] [ns_1@127.0.0.1:<0.6378.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:31] [ns_1@127.0.0.1:<0.6444.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:32] [ns_1@127.0.0.1:<0.6425.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:32] [ns_1@127.0.0.1:<0.6389.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:32] [ns_1@127.0.0.1:<0.6444.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:33] [ns_1@127.0.0.1:<0.6440.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:33] [ns_1@127.0.0.1:<0.6396.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:52:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.6444.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:52:34] [ns_1@127.0.0.1:<0.6436.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:34] [ns_1@127.0.0.1:<0.6401.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:35] [ns_1@127.0.0.1:<0.6459.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:52:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6451.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:52:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6484.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:52:35] [ns_1@127.0.0.1:<0.6409.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:36] [ns_1@127.0.0.1:<0.6452.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:36] [ns_1@127.0.0.1:<0.6413.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:37] [ns_1@127.0.0.1:<0.6472.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:37] [ns_1@127.0.0.1:<0.6423.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:38] [ns_1@127.0.0.1:<0.6466.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:38] [ns_1@127.0.0.1:<0.6428.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:39] [ns_1@127.0.0.1:<0.6488.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:39] [ns_1@127.0.0.1:<0.6434.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:39] [ns_1@127.0.0.1:<0.6514.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:40] [ns_1@127.0.0.1:<0.6479.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:52:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6484.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:52:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6520.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:52:40] [ns_1@127.0.0.1:<0.6438.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:40] [ns_1@127.0.0.1:<0.6514.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:41] [ns_1@127.0.0.1:<0.6499.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:41] [ns_1@127.0.0.1:<0.6448.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:41] [ns_1@127.0.0.1:<0.6514.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:42] [ns_1@127.0.0.1:<0.6495.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:42] [ns_1@127.0.0.1:<0.6457.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:42] [ns_1@127.0.0.1:<0.6514.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:43] [ns_1@127.0.0.1:<0.6510.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:52:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.6514.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:52:43] [ns_1@127.0.0.1:<0.6464.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:44] [ns_1@127.0.0.1:<0.6505.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:44] [ns_1@127.0.0.1:<0.6470.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:45] [ns_1@127.0.0.1:<0.6528.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:52:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6520.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:52:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6554.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:52:45] [ns_1@127.0.0.1:<0.6477.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:46] [ns_1@127.0.0.1:<0.6523.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:46] [ns_1@127.0.0.1:<0.6481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:47] [ns_1@127.0.0.1:<0.6544.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:47] [ns_1@127.0.0.1:<0.6493.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:48] [ns_1@127.0.0.1:<0.6536.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:48] [ns_1@127.0.0.1:<0.6497.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:49] [ns_1@127.0.0.1:<0.6578.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:52:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6554.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:52:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6582.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:52:50] [ns_1@127.0.0.1:<0.6578.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:51] [ns_1@127.0.0.1:<0.6578.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:52] [ns_1@127.0.0.1:<0.6578.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:52:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.6578.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 1:52:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6582.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:52:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6597.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:52:58] [ns_1@127.0.0.1:<0.6508.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:52:59] [ns_1@127.0.0.1:<0.6559.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:52:59] [ns_1@127.0.0.1:<0.6612.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:52:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,751975,685352}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38133736}, {processes,10178728}, {processes_used,8553248}, {system,27955008}, {atom,1306681}, {atom_used,1284164}, {binary,642928}, {code,12859877}, {ets,2304480}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,2997}, {memory_data,{4040077312,4013813760,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25524 kB\nBuffers: 59316 kB\nCached: 3532252 kB\nSwapCached: 0 kB\nActive: 306428 kB\nInactive: 3445032 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25524 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 64 kB\nWriteback: 0 kB\nAnonPages: 159880 kB\nMapped: 24868 kB\nSlab: 134408 kB\nPageTables: 6464 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 578936 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3617026048}, {buffered_memory,60739584}, {free_memory,26136576}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{2992421,0}}, {context_switches,{1053713,0}}, {garbage_collection,{548860,728523381,0}}, {io,{{input,21812111},{output,37711523}}}, {reductions,{229153619,609485}}, {run_queue,0}, {runtime,{43420,140}}]}]}] [stats:error] [2012-03-26 1:52:59] [ns_1@127.0.0.1:<0.6503.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:53:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6597.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:53:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6619.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:53:00] [ns_1@127.0.0.1:<0.6548.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:00] [ns_1@127.0.0.1:<0.6612.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:00] [ns_1@127.0.0.1:<0.6526.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:01] [ns_1@127.0.0.1:<0.6570.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:01] [ns_1@127.0.0.1:<0.6612.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:01] [ns_1@127.0.0.1:<0.6517.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:02] [ns_1@127.0.0.1:<0.6563.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:02] [ns_1@127.0.0.1:<0.6612.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:02] [ns_1@127.0.0.1:<0.6539.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:03] [ns_1@127.0.0.1:<0.6616.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:53:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.6612.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:53:03] [ns_1@127.0.0.1:<0.6534.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:04] [ns_1@127.0.0.1:<0.6574.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:04] [ns_1@127.0.0.1:<0.6551.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:53:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6619.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:53:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6652.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:53:05] [ns_1@127.0.0.1:<0.6632.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:05] [ns_1@127.0.0.1:<0.6546.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:06] [ns_1@127.0.0.1:<0.6605.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:06] [ns_1@127.0.0.1:<0.6565.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:07] [ns_1@127.0.0.1:<0.6645.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:08] [ns_1@127.0.0.1:<0.6561.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:08] [ns_1@127.0.0.1:<0.6625.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:09] [ns_1@127.0.0.1:<0.6607.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:09] [ns_1@127.0.0.1:<0.6661.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:09] [ns_1@127.0.0.1:<0.6684.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:10] [ns_1@127.0.0.1:<0.6572.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:53:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6652.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:53:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6690.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:53:10] [ns_1@127.0.0.1:<0.6638.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:10] [ns_1@127.0.0.1:<0.6684.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:11] [ns_1@127.0.0.1:<0.6627.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:11] [ns_1@127.0.0.1:<0.6671.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:11] [ns_1@127.0.0.1:<0.6684.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:12] [ns_1@127.0.0.1:<0.6620.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:12] [ns_1@127.0.0.1:<0.6649.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:12] [ns_1@127.0.0.1:<0.6684.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:13] [ns_1@127.0.0.1:<0.6640.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:13] [ns_1@127.0.0.1:<0.6687.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:53:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.6684.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:53:14] [ns_1@127.0.0.1:<0.6634.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:14] [ns_1@127.0.0.1:<0.6665.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:15] [ns_1@127.0.0.1:<0.6653.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:53:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6690.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:53:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6724.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:53:15] [ns_1@127.0.0.1:<0.6704.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:16] [ns_1@127.0.0.1:<0.6647.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:16] [ns_1@127.0.0.1:<0.6676.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:17] [ns_1@127.0.0.1:<0.6667.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:17] [ns_1@127.0.0.1:<0.6716.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:18] [ns_1@127.0.0.1:<0.6663.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:18] [ns_1@127.0.0.1:<0.6696.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:19] [ns_1@127.0.0.1:<0.6678.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:19] [ns_1@127.0.0.1:<0.6731.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:19] [ns_1@127.0.0.1:<0.6752.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:20] [ns_1@127.0.0.1:<0.6673.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:53:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6724.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:53:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6758.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:53:20] [ns_1@127.0.0.1:<0.6709.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:20] [ns_1@127.0.0.1:<0.6752.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:21] [ns_1@127.0.0.1:<0.6698.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:21] [ns_1@127.0.0.1:<0.6742.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:21] [ns_1@127.0.0.1:<0.6752.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:22] [ns_1@127.0.0.1:<0.6691.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:22] [ns_1@127.0.0.1:<0.6721.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:22] [ns_1@127.0.0.1:<0.6752.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:23] [ns_1@127.0.0.1:<0.6711.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:23] [ns_1@127.0.0.1:<0.6755.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:53:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.6752.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:53:24] [ns_1@127.0.0.1:<0.6706.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:24] [ns_1@127.0.0.1:<0.6735.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:25] [ns_1@127.0.0.1:<0.6725.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:53:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6758.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:53:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6793.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:53:25] [ns_1@127.0.0.1:<0.6773.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:26] [ns_1@127.0.0.1:<0.6718.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:26] [ns_1@127.0.0.1:<0.6746.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:27] [ns_1@127.0.0.1:<0.6737.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:27] [ns_1@127.0.0.1:<0.6786.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:28] [ns_1@127.0.0.1:<0.6733.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:28] [ns_1@127.0.0.1:<0.6766.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:29] [ns_1@127.0.0.1:<0.6748.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:29] [ns_1@127.0.0.1:<0.6819.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:29] [ns_1@127.0.0.1:<0.6800.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:30] [ns_1@127.0.0.1:<0.6744.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:53:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6793.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:53:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6828.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:53:30] [ns_1@127.0.0.1:<0.6819.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:30] [ns_1@127.0.0.1:<0.6778.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:31] [ns_1@127.0.0.1:<0.6768.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:31] [ns_1@127.0.0.1:<0.6819.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:31] [ns_1@127.0.0.1:<0.6811.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:32] [ns_1@127.0.0.1:<0.6759.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:32] [ns_1@127.0.0.1:<0.6819.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:32] [ns_1@127.0.0.1:<0.6790.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:33] [ns_1@127.0.0.1:<0.6780.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:53:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.6819.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:53:33] [ns_1@127.0.0.1:<0.6825.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:34] [ns_1@127.0.0.1:<0.6775.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:34] [ns_1@127.0.0.1:<0.6805.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:53:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6828.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:53:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6859.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:53:35] [ns_1@127.0.0.1:<0.6796.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:35] [ns_1@127.0.0.1:<0.6841.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:36] [ns_1@127.0.0.1:<0.6788.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:36] [ns_1@127.0.0.1:<0.6815.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:37] [ns_1@127.0.0.1:<0.6809.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:37] [ns_1@127.0.0.1:<0.6854.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:38] [ns_1@127.0.0.1:<0.6802.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:38] [ns_1@127.0.0.1:<0.6834.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:39] [ns_1@127.0.0.1:<0.6822.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:39] [ns_1@127.0.0.1:<0.6889.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:39] [ns_1@127.0.0.1:<0.6870.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:53:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6859.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:53:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6895.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:53:40] [ns_1@127.0.0.1:<0.6813.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:40] [ns_1@127.0.0.1:<0.6889.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:40] [ns_1@127.0.0.1:<0.6847.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:41] [ns_1@127.0.0.1:<0.6839.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:41] [ns_1@127.0.0.1:<0.6889.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:41] [ns_1@127.0.0.1:<0.6880.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:42] [ns_1@127.0.0.1:<0.6832.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:42] [ns_1@127.0.0.1:<0.6889.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:42] [ns_1@127.0.0.1:<0.6860.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:43] [ns_1@127.0.0.1:<0.6852.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:53:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.6889.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:53:44] [ns_1@127.0.0.1:<0.6896.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:44] [ns_1@127.0.0.1:<0.6845.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:45] [ns_1@127.0.0.1:<0.6874.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:53:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6895.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:53:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6929.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:53:45] [ns_1@127.0.0.1:<0.6868.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:46] [ns_1@127.0.0.1:<0.6911.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:46] [ns_1@127.0.0.1:<0.6856.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:47] [ns_1@127.0.0.1:<0.6885.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:47] [ns_1@127.0.0.1:<0.6878.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:48] [ns_1@127.0.0.1:<0.6923.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:48] [ns_1@127.0.0.1:<0.6872.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:49] [ns_1@127.0.0.1:<0.6903.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:49] [ns_1@127.0.0.1:<0.6892.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:49] [ns_1@127.0.0.1:<0.6957.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:50] [ns_1@127.0.0.1:<0.6938.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:53:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6929.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:53:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6963.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:53:50] [ns_1@127.0.0.1:<0.6883.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:50] [ns_1@127.0.0.1:<0.6957.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:51] [ns_1@127.0.0.1:<0.6916.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:51] [ns_1@127.0.0.1:<0.6909.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:51] [ns_1@127.0.0.1:<0.6957.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:52] [ns_1@127.0.0.1:<0.6949.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:52] [ns_1@127.0.0.1:<0.6901.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:52] [ns_1@127.0.0.1:<0.6957.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:53] [ns_1@127.0.0.1:<0.6930.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:53] [ns_1@127.0.0.1:<0.6921.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:53:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.6957.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:53:54] [ns_1@127.0.0.1:<0.6964.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:54] [ns_1@127.0.0.1:<0.6914.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:55] [ns_1@127.0.0.1:<0.6942.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:53:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6963.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:53:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.6998.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:53:55] [ns_1@127.0.0.1:<0.6936.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:56] [ns_1@127.0.0.1:<0.6980.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:56] [ns_1@127.0.0.1:<0.6926.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:57] [ns_1@127.0.0.1:<0.6953.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:57] [ns_1@127.0.0.1:<0.6947.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:58] [ns_1@127.0.0.1:<0.6993.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:58] [ns_1@127.0.0.1:<0.6940.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:59] [ns_1@127.0.0.1:<0.6973.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:59] [ns_1@127.0.0.1:<0.6985.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:59] [ns_1@127.0.0.1:<0.6999.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:53:59] [ns_1@127.0.0.1:<0.6960.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:53:59] [ns_1@127.0.0.1:<0.7046.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:53:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,752035,711461}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38230688}, {processes,10302960}, {processes_used,8677480}, {system,27927728}, {atom,1306681}, {atom_used,1284164}, {binary,639600}, {code,12859877}, {ets,2273784}]}, {system_stats, [{cpu_utilization_rate,25.43640897755611}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,3057}, {memory_data,{4040077312,4013948928,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25044 kB\nBuffers: 59396 kB\nCached: 3532404 kB\nSwapCached: 0 kB\nActive: 307028 kB\nInactive: 3445144 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25044 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 68 kB\nWriteback: 0 kB\nAnonPages: 160404 kB\nMapped: 24868 kB\nSlab: 134380 kB\nPageTables: 6464 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582172 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3617181696}, {buffered_memory,60821504}, {free_memory,25645056}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{3052447,0}}, {context_switches,{1065973,0}}, {garbage_collection,{555626,738536420,0}}, {io,{{input,21842682},{output,38120798}}}, {reductions,{231544383,642907}}, {run_queue,0}, {runtime,{44040,150}}]}]}] [stats:error] [2012-03-26 1:54:00] [ns_1@127.0.0.1:<0.7007.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:54:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.6998.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:54:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7053.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:54:00] [ns_1@127.0.0.1:<0.6951.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:00] [ns_1@127.0.0.1:<0.7046.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:01] [ns_1@127.0.0.1:<0.7012.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:01] [ns_1@127.0.0.1:<0.6978.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:01] [ns_1@127.0.0.1:<0.7046.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:02] [ns_1@127.0.0.1:<0.7018.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:02] [ns_1@127.0.0.1:<0.7046.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:02] [ns_1@127.0.0.1:<0.6971.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:03] [ns_1@127.0.0.1:<0.7041.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:54:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.7046.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:54:03] [ns_1@127.0.0.1:<0.6991.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:04] [ns_1@127.0.0.1:<0.7056.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:04] [ns_1@127.0.0.1:<0.6983.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:05] [ns_1@127.0.0.1:<0.7061.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:54:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7053.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:54:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7086.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:54:05] [ns_1@127.0.0.1:<0.7005.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:06] [ns_1@127.0.0.1:<0.7069.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:06] [ns_1@127.0.0.1:<0.6995.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:07] [ns_1@127.0.0.1:<0.7077.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:07] [ns_1@127.0.0.1:<0.7016.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:08] [ns_1@127.0.0.1:<0.7081.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:08] [ns_1@127.0.0.1:<0.7010.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:09] [ns_1@127.0.0.1:<0.7093.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:09] [ns_1@127.0.0.1:<0.7116.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:09] [ns_1@127.0.0.1:<0.7050.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:54:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7086.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:54:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7122.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:54:10] [ns_1@127.0.0.1:<0.7097.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:10] [ns_1@127.0.0.1:<0.7116.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:10] [ns_1@127.0.0.1:<0.7020.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:11] [ns_1@127.0.0.1:<0.7103.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:11] [ns_1@127.0.0.1:<0.7116.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:11] [ns_1@127.0.0.1:<0.7066.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:12] [ns_1@127.0.0.1:<0.7108.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:12] [ns_1@127.0.0.1:<0.7116.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:12] [ns_1@127.0.0.1:<0.7022.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:13] [ns_1@127.0.0.1:<0.7119.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:54:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.7116.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:54:13] [ns_1@127.0.0.1:<0.7079.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:14] [ns_1@127.0.0.1:<0.7128.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:14] [ns_1@127.0.0.1:<0.7024.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:54:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7122.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:54:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7156.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:54:15] [ns_1@127.0.0.1:<0.7136.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:15] [ns_1@127.0.0.1:<0.7095.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:16] [ns_1@127.0.0.1:<0.7141.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:16] [ns_1@127.0.0.1:<0.7059.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:17] [ns_1@127.0.0.1:<0.7148.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:18] [ns_1@127.0.0.1:<0.7105.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:18] [ns_1@127.0.0.1:<0.7153.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:19] [ns_1@127.0.0.1:<0.7072.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:19] [ns_1@127.0.0.1:<0.7163.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:19] [ns_1@127.0.0.1:<0.7184.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:20] [ns_1@127.0.0.1:<0.7123.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:54:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7156.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:54:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7190.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:54:20] [ns_1@127.0.0.1:<0.7167.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:20] [ns_1@127.0.0.1:<0.7184.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:21] [ns_1@127.0.0.1:<0.7083.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:21] [ns_1@127.0.0.1:<0.7174.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:21] [ns_1@127.0.0.1:<0.7184.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:22] [ns_1@127.0.0.1:<0.7138.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:22] [ns_1@127.0.0.1:<0.7178.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:22] [ns_1@127.0.0.1:<0.7184.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:23] [ns_1@127.0.0.1:<0.7099.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:23] [ns_1@127.0.0.1:<0.7187.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:54:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.7184.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:54:24] [ns_1@127.0.0.1:<0.7150.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:24] [ns_1@127.0.0.1:<0.7198.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:25] [ns_1@127.0.0.1:<0.7110.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:54:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7190.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:54:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7225.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:54:25] [ns_1@127.0.0.1:<0.7205.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:26] [ns_1@127.0.0.1:<0.7165.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:26] [ns_1@127.0.0.1:<0.7210.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:27] [ns_1@127.0.0.1:<0.7130.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:27] [ns_1@127.0.0.1:<0.7218.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:28] [ns_1@127.0.0.1:<0.7176.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:28] [ns_1@127.0.0.1:<0.7222.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:29] [ns_1@127.0.0.1:<0.7143.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:29] [ns_1@127.0.0.1:<0.7232.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:29] [ns_1@127.0.0.1:<0.7253.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:30] [ns_1@127.0.0.1:<0.7191.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:54:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7225.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:54:30] [ns_1@127.0.0.1:<0.7253.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:info] [2012-03-26 1:54:31] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7260.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:54:31] [ns_1@127.0.0.1:<0.7249.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:31] [ns_1@127.0.0.1:<0.7253.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:32] [ns_1@127.0.0.1:<0.7257.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:32] [ns_1@127.0.0.1:<0.7237.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:32] [ns_1@127.0.0.1:<0.7253.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:33] [ns_1@127.0.0.1:<0.7157.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:33] [ns_1@127.0.0.1:<0.7243.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:54:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.7253.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:54:34] [ns_1@127.0.0.1:<0.7207.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:34] [ns_1@127.0.0.1:<0.7247.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:35] [ns_1@127.0.0.1:<0.7169.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:54:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7260.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:54:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7289.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:54:35] [ns_1@127.0.0.1:<0.7269.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:36] [ns_1@127.0.0.1:<0.7220.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:36] [ns_1@127.0.0.1:<0.7275.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:37] [ns_1@127.0.0.1:<0.7180.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:37] [ns_1@127.0.0.1:<0.7282.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:38] [ns_1@127.0.0.1:<0.7234.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:38] [ns_1@127.0.0.1:<0.7286.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:39] [ns_1@127.0.0.1:<0.7200.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:39] [ns_1@127.0.0.1:<0.7317.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:39] [ns_1@127.0.0.1:<0.7298.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:40] [ns_1@127.0.0.1:<0.7245.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:54:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7289.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:54:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7325.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:54:40] [ns_1@127.0.0.1:<0.7317.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:40] [ns_1@127.0.0.1:<0.7302.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:41] [ns_1@127.0.0.1:<0.7212.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:41] [ns_1@127.0.0.1:<0.7317.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:41] [ns_1@127.0.0.1:<0.7308.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:42] [ns_1@127.0.0.1:<0.7271.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:42] [ns_1@127.0.0.1:<0.7317.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:42] [ns_1@127.0.0.1:<0.7313.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:43] [ns_1@127.0.0.1:<0.7226.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:54:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.7317.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:54:43] [ns_1@127.0.0.1:<0.7322.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:44] [ns_1@127.0.0.1:<0.7284.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:44] [ns_1@127.0.0.1:<0.7331.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:54:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7325.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:54:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7357.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:54:45] [ns_1@127.0.0.1:<0.7239.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:45] [ns_1@127.0.0.1:<0.7339.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:46] [ns_1@127.0.0.1:<0.7300.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:46] [ns_1@127.0.0.1:<0.7344.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:47] [ns_1@127.0.0.1:<0.7264.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:47] [ns_1@127.0.0.1:<0.7351.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:48] [ns_1@127.0.0.1:<0.7310.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:48] [ns_1@127.0.0.1:<0.7358.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:49] [ns_1@127.0.0.1:<0.7277.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:49] [ns_1@127.0.0.1:<0.7385.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:49] [ns_1@127.0.0.1:<0.7366.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:54:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7357.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:54:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7391.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:54:50] [ns_1@127.0.0.1:<0.7329.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:50] [ns_1@127.0.0.1:<0.7385.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:50] [ns_1@127.0.0.1:<0.7370.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:51] [ns_1@127.0.0.1:<0.7290.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:51] [ns_1@127.0.0.1:<0.7385.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:51] [ns_1@127.0.0.1:<0.7377.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:52] [ns_1@127.0.0.1:<0.7342.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:52] [ns_1@127.0.0.1:<0.7385.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:52] [ns_1@127.0.0.1:<0.7381.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:53] [ns_1@127.0.0.1:<0.7304.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:54:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.7385.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:54:53] [ns_1@127.0.0.1:<0.7392.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:54] [ns_1@127.0.0.1:<0.7354.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:55] [ns_1@127.0.0.1:<0.7401.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:54:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7391.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:54:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7426.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:54:55] [ns_1@127.0.0.1:<0.7320.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:56] [ns_1@127.0.0.1:<0.7408.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:56] [ns_1@127.0.0.1:<0.7368.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:57] [ns_1@127.0.0.1:<0.7413.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:57] [ns_1@127.0.0.1:<0.7336.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:58] [ns_1@127.0.0.1:<0.7421.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:58] [ns_1@127.0.0.1:<0.7379.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:59] [ns_1@127.0.0.1:<0.7427.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:59] [ns_1@127.0.0.1:<0.7349.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:59] [ns_1@127.0.0.1:<0.7364.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:54:59] [ns_1@127.0.0.1:<0.7375.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:54:59] [ns_1@127.0.0.1:<0.7459.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:54:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,752095,738642}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38098720}, {processes,10126672}, {processes_used,8501192}, {system,27972048}, {atom,1306681}, {atom_used,1284164}, {binary,645528}, {code,12859877}, {ets,2305000}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,3117}, {memory_data,{4040077312,4014686208,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25284 kB\nBuffers: 59532 kB\nCached: 3532544 kB\nSwapCached: 0 kB\nActive: 306584 kB\nInactive: 3445396 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25284 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 80 kB\nWriteback: 0 kB\nAnonPages: 159912 kB\nMapped: 24868 kB\nSlab: 134400 kB\nPageTables: 6464 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 578348 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3617325056}, {buffered_memory,60960768}, {free_memory,25890816}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{3112475,0}}, {context_switches,{1079285,0}}, {garbage_collection,{563054,749804589,0}}, {io,{{input,22119535},{output,38784827}}}, {reductions,{234198002,640989}}, {run_queue,0}, {runtime,{44680,140}}]}]}] [stats:error] [2012-03-26 1:55:00] [ns_1@127.0.0.1:<0.7435.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:55:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7426.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:55:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7466.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:55:00] [ns_1@127.0.0.1:<0.7399.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:00] [ns_1@127.0.0.1:<0.7459.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:01] [ns_1@127.0.0.1:<0.7440.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:01] [ns_1@127.0.0.1:<0.7388.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:01] [ns_1@127.0.0.1:<0.7459.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:02] [ns_1@127.0.0.1:<0.7446.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:02] [ns_1@127.0.0.1:<0.7411.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:02] [ns_1@127.0.0.1:<0.7459.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:03] [ns_1@127.0.0.1:<0.7450.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:03] [ns_1@127.0.0.1:<0.7406.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:55:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.7459.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:55:04] [ns_1@127.0.0.1:<0.7467.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:04] [ns_1@127.0.0.1:<0.7423.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:05] [ns_1@127.0.0.1:<0.7452.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:55:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7466.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:55:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7499.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:55:05] [ns_1@127.0.0.1:<0.7419.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:06] [ns_1@127.0.0.1:<0.7481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:06] [ns_1@127.0.0.1:<0.7438.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:07] [ns_1@127.0.0.1:<0.7454.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:07] [ns_1@127.0.0.1:<0.7433.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:08] [ns_1@127.0.0.1:<0.7494.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:08] [ns_1@127.0.0.1:<0.7448.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:09] [ns_1@127.0.0.1:<0.7474.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:09] [ns_1@127.0.0.1:<0.7444.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:09] [ns_1@127.0.0.1:<0.7531.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:10] [ns_1@127.0.0.1:<0.7510.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:55:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7499.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:55:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7537.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:55:10] [ns_1@127.0.0.1:<0.7472.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:10] [ns_1@127.0.0.1:<0.7531.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:11] [ns_1@127.0.0.1:<0.7487.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:11] [ns_1@127.0.0.1:<0.7463.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:11] [ns_1@127.0.0.1:<0.7531.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:12] [ns_1@127.0.0.1:<0.7520.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:12] [ns_1@127.0.0.1:<0.7485.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:12] [ns_1@127.0.0.1:<0.7531.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:13] [ns_1@127.0.0.1:<0.7500.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:13] [ns_1@127.0.0.1:<0.7479.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:55:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.7531.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:55:14] [ns_1@127.0.0.1:<0.7538.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:14] [ns_1@127.0.0.1:<0.7496.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:15] [ns_1@127.0.0.1:<0.7514.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:55:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7537.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:55:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7571.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:55:15] [ns_1@127.0.0.1:<0.7492.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:16] [ns_1@127.0.0.1:<0.7553.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:16] [ns_1@127.0.0.1:<0.7512.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:17] [ns_1@127.0.0.1:<0.7525.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:17] [ns_1@127.0.0.1:<0.7508.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:18] [ns_1@127.0.0.1:<0.7565.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:18] [ns_1@127.0.0.1:<0.7523.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:19] [ns_1@127.0.0.1:<0.7545.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:19] [ns_1@127.0.0.1:<0.7597.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:19] [ns_1@127.0.0.1:<0.7518.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:55:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7571.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:55:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7603.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:55:20] [ns_1@127.0.0.1:<0.7580.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:20] [ns_1@127.0.0.1:<0.7597.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:20] [ns_1@127.0.0.1:<0.7543.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:21] [ns_1@127.0.0.1:<0.7560.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:21] [ns_1@127.0.0.1:<0.7597.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:21] [ns_1@127.0.0.1:<0.7534.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:22] [ns_1@127.0.0.1:<0.7591.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:22] [ns_1@127.0.0.1:<0.7597.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:22] [ns_1@127.0.0.1:<0.7556.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:23] [ns_1@127.0.0.1:<0.7576.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:55:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.7597.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:55:23] [ns_1@127.0.0.1:<0.7551.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:24] [ns_1@127.0.0.1:<0.7611.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:24] [ns_1@127.0.0.1:<0.7568.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:55:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7603.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:55:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7638.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:55:25] [ns_1@127.0.0.1:<0.7587.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:25] [ns_1@127.0.0.1:<0.7563.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:26] [ns_1@127.0.0.1:<0.7623.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:26] [ns_1@127.0.0.1:<0.7582.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:27] [ns_1@127.0.0.1:<0.7600.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:27] [ns_1@127.0.0.1:<0.7578.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:28] [ns_1@127.0.0.1:<0.7635.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:28] [ns_1@127.0.0.1:<0.7593.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:29] [ns_1@127.0.0.1:<0.7618.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:29] [ns_1@127.0.0.1:<0.7666.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:30] [ns_1@127.0.0.1:<0.7589.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:55:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7638.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:55:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7673.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:55:30] [ns_1@127.0.0.1:<0.7650.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:30] [ns_1@127.0.0.1:<0.7666.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:31] [ns_1@127.0.0.1:<0.7613.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:31] [ns_1@127.0.0.1:<0.7631.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:31] [ns_1@127.0.0.1:<0.7666.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:32] [ns_1@127.0.0.1:<0.7604.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:32] [ns_1@127.0.0.1:<0.7660.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:32] [ns_1@127.0.0.1:<0.7666.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:33] [ns_1@127.0.0.1:<0.7625.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:33] [ns_1@127.0.0.1:<0.7645.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:55:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.7666.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:55:34] [ns_1@127.0.0.1:<0.7620.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:34] [ns_1@127.0.0.1:<0.7679.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:35] [ns_1@127.0.0.1:<0.7639.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:55:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7673.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:55:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7706.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:55:35] [ns_1@127.0.0.1:<0.7656.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:36] [ns_1@127.0.0.1:<0.7633.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:36] [ns_1@127.0.0.1:<0.7692.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:37] [ns_1@127.0.0.1:<0.7652.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:37] [ns_1@127.0.0.1:<0.7670.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:38] [ns_1@127.0.0.1:<0.7647.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:38] [ns_1@127.0.0.1:<0.7703.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:39] [ns_1@127.0.0.1:<0.7662.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:39] [ns_1@127.0.0.1:<0.7686.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:39] [ns_1@127.0.0.1:<0.7736.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:40] [ns_1@127.0.0.1:<0.7658.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:55:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7706.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:55:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7742.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:55:40] [ns_1@127.0.0.1:<0.7719.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:40] [ns_1@127.0.0.1:<0.7736.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:41] [ns_1@127.0.0.1:<0.7681.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:41] [ns_1@127.0.0.1:<0.7699.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:41] [ns_1@127.0.0.1:<0.7736.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:42] [ns_1@127.0.0.1:<0.7674.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:42] [ns_1@127.0.0.1:<0.7730.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:42] [ns_1@127.0.0.1:<0.7736.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:43] [ns_1@127.0.0.1:<0.7694.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:43] [ns_1@127.0.0.1:<0.7715.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:55:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.7736.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:55:44] [ns_1@127.0.0.1:<0.7688.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:44] [ns_1@127.0.0.1:<0.7748.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:45] [ns_1@127.0.0.1:<0.7707.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:55:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7742.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:55:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7776.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:55:45] [ns_1@127.0.0.1:<0.7725.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:46] [ns_1@127.0.0.1:<0.7701.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:46] [ns_1@127.0.0.1:<0.7761.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:47] [ns_1@127.0.0.1:<0.7721.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:47] [ns_1@127.0.0.1:<0.7739.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:48] [ns_1@127.0.0.1:<0.7717.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:48] [ns_1@127.0.0.1:<0.7773.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:49] [ns_1@127.0.0.1:<0.7732.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:49] [ns_1@127.0.0.1:<0.7802.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:49] [ns_1@127.0.0.1:<0.7756.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:50] [ns_1@127.0.0.1:<0.7727.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:55:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7776.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:55:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7810.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:55:50] [ns_1@127.0.0.1:<0.7802.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:50] [ns_1@127.0.0.1:<0.7787.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:51] [ns_1@127.0.0.1:<0.7750.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:51] [ns_1@127.0.0.1:<0.7802.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:51] [ns_1@127.0.0.1:<0.7768.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:52] [ns_1@127.0.0.1:<0.7743.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:52] [ns_1@127.0.0.1:<0.7802.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:52] [ns_1@127.0.0.1:<0.7798.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:53] [ns_1@127.0.0.1:<0.7763.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:55:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.7802.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:55:53] [ns_1@127.0.0.1:<0.7783.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:54] [ns_1@127.0.0.1:<0.7758.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:54] [ns_1@127.0.0.1:<0.7818.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:55:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7810.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:55:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7843.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:55:55] [ns_1@127.0.0.1:<0.7777.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:55] [ns_1@127.0.0.1:<0.7794.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:56] [ns_1@127.0.0.1:<0.7770.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:56] [ns_1@127.0.0.1:<0.7830.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:57] [ns_1@127.0.0.1:<0.7789.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:57] [ns_1@127.0.0.1:<0.7807.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:58] [ns_1@127.0.0.1:<0.7785.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:58] [ns_1@127.0.0.1:<0.7844.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:59] [ns_1@127.0.0.1:<0.7805.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:55:59] [ns_1@127.0.0.1:<0.7872.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:55:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,752155,767332}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38344424}, {processes,10392200}, {processes_used,8766720}, {system,27952224}, {atom,1306681}, {atom_used,1284164}, {binary,645752}, {code,12859877}, {ets,2277872}]}, {system_stats, [{cpu_utilization_rate,25.25}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,3177}, {memory_data,{4040077312,4014194688,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25416 kB\nBuffers: 59604 kB\nCached: 3532704 kB\nSwapCached: 0 kB\nActive: 306696 kB\nInactive: 3445544 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25416 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 80 kB\nWriteback: 0 kB\nAnonPages: 159920 kB\nMapped: 24868 kB\nSlab: 134396 kB\nPageTables: 6464 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 578348 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3617488896}, {buffered_memory,61034496}, {free_memory,26025984}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{3172503,0}}, {context_switches,{1092307,0}}, {garbage_collection,{570082,760953837,0}}, {io,{{input,22150142},{output,39227963}}}, {reductions,{236769718,635081}}, {run_queue,0}, {runtime,{45320,160}}]}]}] [stats:error] [2012-03-26 1:55:59] [ns_1@127.0.0.1:<0.7825.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:59] [ns_1@127.0.0.1:<0.7838.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:55:59] [ns_1@127.0.0.1:<0.7852.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:56:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7843.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:56:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7883.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:56:00] [ns_1@127.0.0.1:<0.7796.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:00] [ns_1@127.0.0.1:<0.7872.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:00] [ns_1@127.0.0.1:<0.7857.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:01] [ns_1@127.0.0.1:<0.7823.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:01] [ns_1@127.0.0.1:<0.7872.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:01] [ns_1@127.0.0.1:<0.7863.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:02] [ns_1@127.0.0.1:<0.7815.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:02] [ns_1@127.0.0.1:<0.7872.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:03] [ns_1@127.0.0.1:<0.7867.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:03] [ns_1@127.0.0.1:<0.7836.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:56:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.7872.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:56:04] [ns_1@127.0.0.1:<0.7884.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:04] [ns_1@127.0.0.1:<0.7828.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:05] [ns_1@127.0.0.1:<0.7891.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:56:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7883.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:56:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7916.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:56:05] [ns_1@127.0.0.1:<0.7850.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:06] [ns_1@127.0.0.1:<0.7898.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:06] [ns_1@127.0.0.1:<0.7840.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:07] [ns_1@127.0.0.1:<0.7904.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:07] [ns_1@127.0.0.1:<0.7861.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:08] [ns_1@127.0.0.1:<0.7911.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:08] [ns_1@127.0.0.1:<0.7855.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:09] [ns_1@127.0.0.1:<0.7917.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:09] [ns_1@127.0.0.1:<0.7876.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:09] [ns_1@127.0.0.1:<0.7948.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:10] [ns_1@127.0.0.1:<0.7927.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:56:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7916.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:56:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7954.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:56:10] [ns_1@127.0.0.1:<0.7865.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:10] [ns_1@127.0.0.1:<0.7948.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:11] [ns_1@127.0.0.1:<0.7931.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:11] [ns_1@127.0.0.1:<0.7878.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:11] [ns_1@127.0.0.1:<0.7948.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:12] [ns_1@127.0.0.1:<0.7937.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:12] [ns_1@127.0.0.1:<0.7889.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:12] [ns_1@127.0.0.1:<0.7948.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:13] [ns_1@127.0.0.1:<0.7942.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:13] [ns_1@127.0.0.1:<0.7880.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:56:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.7948.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:56:14] [ns_1@127.0.0.1:<0.7955.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:14] [ns_1@127.0.0.1:<0.7902.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:15] [ns_1@127.0.0.1:<0.7962.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:56:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7954.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:56:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.7988.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:56:15] [ns_1@127.0.0.1:<0.7896.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:16] [ns_1@127.0.0.1:<0.7970.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:16] [ns_1@127.0.0.1:<0.7913.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:17] [ns_1@127.0.0.1:<0.7975.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:17] [ns_1@127.0.0.1:<0.7909.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:18] [ns_1@127.0.0.1:<0.7982.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:18] [ns_1@127.0.0.1:<0.7929.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:19] [ns_1@127.0.0.1:<0.7989.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:19] [ns_1@127.0.0.1:<0.7925.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:19] [ns_1@127.0.0.1:<0.8016.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:20] [ns_1@127.0.0.1:<0.7997.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:56:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.7988.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:56:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8022.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:56:20] [ns_1@127.0.0.1:<0.7940.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:20] [ns_1@127.0.0.1:<0.8016.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:21] [ns_1@127.0.0.1:<0.8001.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:21] [ns_1@127.0.0.1:<0.7935.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:21] [ns_1@127.0.0.1:<0.8016.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:22] [ns_1@127.0.0.1:<0.8008.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:22] [ns_1@127.0.0.1:<0.8016.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:22] [ns_1@127.0.0.1:<0.7960.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:23] [ns_1@127.0.0.1:<0.8012.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:56:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.8016.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:56:23] [ns_1@127.0.0.1:<0.7951.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:24] [ns_1@127.0.0.1:<0.8023.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:24] [ns_1@127.0.0.1:<0.7973.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:25] [ns_1@127.0.0.1:<0.8032.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:56:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8022.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:56:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8057.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:56:25] [ns_1@127.0.0.1:<0.7968.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:26] [ns_1@127.0.0.1:<0.8040.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:26] [ns_1@127.0.0.1:<0.7985.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:27] [ns_1@127.0.0.1:<0.8047.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:27] [ns_1@127.0.0.1:<0.7980.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:28] [ns_1@127.0.0.1:<0.8052.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:28] [ns_1@127.0.0.1:<0.7999.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:29] [ns_1@127.0.0.1:<0.8062.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:29] [ns_1@127.0.0.1:<0.8083.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:29] [ns_1@127.0.0.1:<0.7995.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:56:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8057.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:56:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8090.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:56:30] [ns_1@127.0.0.1:<0.8067.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:30] [ns_1@127.0.0.1:<0.8083.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:30] [ns_1@127.0.0.1:<0.8010.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:31] [ns_1@127.0.0.1:<0.8073.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:31] [ns_1@127.0.0.1:<0.8083.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:31] [ns_1@127.0.0.1:<0.8006.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:32] [ns_1@127.0.0.1:<0.8077.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:32] [ns_1@127.0.0.1:<0.8083.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:32] [ns_1@127.0.0.1:<0.8030.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:33] [ns_1@127.0.0.1:<0.8087.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:56:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.8083.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:56:33] [ns_1@127.0.0.1:<0.8019.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:34] [ns_1@127.0.0.1:<0.8096.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:34] [ns_1@127.0.0.1:<0.8042.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:56:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8090.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:56:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8123.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:56:35] [ns_1@127.0.0.1:<0.8103.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:35] [ns_1@127.0.0.1:<0.8037.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:36] [ns_1@127.0.0.1:<0.8109.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:36] [ns_1@127.0.0.1:<0.8054.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:37] [ns_1@127.0.0.1:<0.8116.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:38] [ns_1@127.0.0.1:<0.8050.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:38] [ns_1@127.0.0.1:<0.8120.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:39] [ns_1@127.0.0.1:<0.8069.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:39] [ns_1@127.0.0.1:<0.8132.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:39] [ns_1@127.0.0.1:<0.8153.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:40] [ns_1@127.0.0.1:<0.8064.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:56:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8123.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:56:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8159.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:56:40] [ns_1@127.0.0.1:<0.8136.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:40] [ns_1@127.0.0.1:<0.8153.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:41] [ns_1@127.0.0.1:<0.8079.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:41] [ns_1@127.0.0.1:<0.8142.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:41] [ns_1@127.0.0.1:<0.8153.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:42] [ns_1@127.0.0.1:<0.8075.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:42] [ns_1@127.0.0.1:<0.8147.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:42] [ns_1@127.0.0.1:<0.8153.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:43] [ns_1@127.0.0.1:<0.8098.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:43] [ns_1@127.0.0.1:<0.8156.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:56:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.8153.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:56:44] [ns_1@127.0.0.1:<0.8091.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:44] [ns_1@127.0.0.1:<0.8165.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:45] [ns_1@127.0.0.1:<0.8111.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:56:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8159.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:56:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8193.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:56:45] [ns_1@127.0.0.1:<0.8173.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:46] [ns_1@127.0.0.1:<0.8105.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:46] [ns_1@127.0.0.1:<0.8178.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:47] [ns_1@127.0.0.1:<0.8124.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:47] [ns_1@127.0.0.1:<0.8185.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:48] [ns_1@127.0.0.1:<0.8118.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:48] [ns_1@127.0.0.1:<0.8190.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:49] [ns_1@127.0.0.1:<0.8138.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:49] [ns_1@127.0.0.1:<0.8200.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:49] [ns_1@127.0.0.1:<0.8221.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:50] [ns_1@127.0.0.1:<0.8134.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:56:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8193.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:56:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8227.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:56:50] [ns_1@127.0.0.1:<0.8204.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:50] [ns_1@127.0.0.1:<0.8221.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:51] [ns_1@127.0.0.1:<0.8149.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:51] [ns_1@127.0.0.1:<0.8211.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:51] [ns_1@127.0.0.1:<0.8221.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:52] [ns_1@127.0.0.1:<0.8144.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:52] [ns_1@127.0.0.1:<0.8215.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:52] [ns_1@127.0.0.1:<0.8221.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:53] [ns_1@127.0.0.1:<0.8167.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:53] [ns_1@127.0.0.1:<0.8224.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:56:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.8221.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:56:54] [ns_1@127.0.0.1:<0.8160.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:54] [ns_1@127.0.0.1:<0.8235.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:55] [ns_1@127.0.0.1:<0.8180.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:56:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8227.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:56:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8262.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:56:55] [ns_1@127.0.0.1:<0.8242.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:56] [ns_1@127.0.0.1:<0.8175.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:56] [ns_1@127.0.0.1:<0.8247.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:57] [ns_1@127.0.0.1:<0.8194.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:57] [ns_1@127.0.0.1:<0.8255.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:58] [ns_1@127.0.0.1:<0.8187.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:58] [ns_1@127.0.0.1:<0.8259.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:56:59] [ns_1@127.0.0.1:<0.8206.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:56:59] [ns_1@127.0.0.1:<0.8302.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:56:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,752215,797248}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38191664}, {processes,10205192}, {processes_used,8579712}, {system,27986472}, {atom,1306681}, {atom_used,1284164}, {binary,646168}, {code,12859877}, {ets,2307416}]}, {system_stats, [{cpu_utilization_rate,25.252525252525253}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,3237}, {memory_data,{4040077312,4014051328,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25044 kB\nBuffers: 59664 kB\nCached: 3532860 kB\nSwapCached: 0 kB\nActive: 307344 kB\nInactive: 3445624 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25044 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 72 kB\nWriteback: 0 kB\nAnonPages: 160436 kB\nMapped: 24868 kB\nSlab: 134360 kB\nPageTables: 6464 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 578348 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3617648640}, {buffered_memory,61095936}, {free_memory,25645056}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{3232533,0}}, {context_switches,{1105394,0}}, {garbage_collection,{577219,772243645,0}}, {io,{{input,22180828},{output,39669812}}}, {reductions,{239373650,644623}}, {run_queue,0}, {runtime,{45960,150}}]}]}] [stats:error] [2012-03-26 1:56:59] [ns_1@127.0.0.1:<0.8269.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:00] [ns_1@127.0.0.1:<0.8202.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:00] [ns_1@127.0.0.1:<0.8213.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:00] [ns_1@127.0.0.1:<0.8228.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:00] [ns_1@127.0.0.1:<0.8244.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:00] [ns_1@127.0.0.1:<0.8257.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:57:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8262.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:57:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8319.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:57:00] [ns_1@127.0.0.1:<0.8302.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:00] [ns_1@127.0.0.1:<0.8274.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:01] [ns_1@127.0.0.1:<0.8217.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:01] [ns_1@127.0.0.1:<0.8302.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:01] [ns_1@127.0.0.1:<0.8280.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:02] [ns_1@127.0.0.1:<0.8271.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:02] [ns_1@127.0.0.1:<0.8302.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:02] [ns_1@127.0.0.1:<0.8284.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:03] [ns_1@127.0.0.1:<0.8237.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:57:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.8302.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:57:03] [ns_1@127.0.0.1:<0.8308.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:04] [ns_1@127.0.0.1:<0.8282.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:04] [ns_1@127.0.0.1:<0.8325.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:57:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8319.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:57:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8350.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:57:05] [ns_1@127.0.0.1:<0.8249.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:05] [ns_1@127.0.0.1:<0.8310.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:06] [ns_1@127.0.0.1:<0.8323.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:06] [ns_1@127.0.0.1:<0.8338.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:07] [ns_1@127.0.0.1:<0.8263.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:07] [ns_1@127.0.0.1:<0.8312.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:08] [ns_1@127.0.0.1:<0.8336.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:08] [ns_1@127.0.0.1:<0.8351.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:09] [ns_1@127.0.0.1:<0.8278.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:09] [ns_1@127.0.0.1:<0.8382.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:10] [ns_1@127.0.0.1:<0.8314.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:57:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8350.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:57:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8388.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:57:10] [ns_1@127.0.0.1:<0.8347.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:10] [ns_1@127.0.0.1:<0.8382.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:11] [ns_1@127.0.0.1:<0.8365.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:11] [ns_1@127.0.0.1:<0.8306.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:11] [ns_1@127.0.0.1:<0.8382.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:12] [ns_1@127.0.0.1:<0.8316.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:12] [ns_1@127.0.0.1:<0.8363.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:12] [ns_1@127.0.0.1:<0.8382.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:13] [ns_1@127.0.0.1:<0.8376.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:13] [ns_1@127.0.0.1:<0.8330.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:57:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.8382.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:57:14] [ns_1@127.0.0.1:<0.8332.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:14] [ns_1@127.0.0.1:<0.8374.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:15] [ns_1@127.0.0.1:<0.8396.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:57:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8388.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:57:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8422.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:57:15] [ns_1@127.0.0.1:<0.8343.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:16] [ns_1@127.0.0.1:<0.8345.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:16] [ns_1@127.0.0.1:<0.8394.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:17] [ns_1@127.0.0.1:<0.8409.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:17] [ns_1@127.0.0.1:<0.8359.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:18] [ns_1@127.0.0.1:<0.8361.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:18] [ns_1@127.0.0.1:<0.8407.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:19] [ns_1@127.0.0.1:<0.8423.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:19] [ns_1@127.0.0.1:<0.8369.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:19] [ns_1@127.0.0.1:<0.8450.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:20] [ns_1@127.0.0.1:<0.8371.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:57:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8422.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:57:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8456.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:57:20] [ns_1@127.0.0.1:<0.8419.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:20] [ns_1@127.0.0.1:<0.8450.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:21] [ns_1@127.0.0.1:<0.8435.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:21] [ns_1@127.0.0.1:<0.8385.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:21] [ns_1@127.0.0.1:<0.8450.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:22] [ns_1@127.0.0.1:<0.8389.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:22] [ns_1@127.0.0.1:<0.8433.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:22] [ns_1@127.0.0.1:<0.8450.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:23] [ns_1@127.0.0.1:<0.8446.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:23] [ns_1@127.0.0.1:<0.8402.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:57:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.8450.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:57:24] [ns_1@127.0.0.1:<0.8404.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:24] [ns_1@127.0.0.1:<0.8444.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:25] [ns_1@127.0.0.1:<0.8466.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:57:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8456.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:57:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8491.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:57:25] [ns_1@127.0.0.1:<0.8414.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:26] [ns_1@127.0.0.1:<0.8416.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:26] [ns_1@127.0.0.1:<0.8464.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:27] [ns_1@127.0.0.1:<0.8478.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:27] [ns_1@127.0.0.1:<0.8429.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:28] [ns_1@127.0.0.1:<0.8431.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:28] [ns_1@127.0.0.1:<0.8476.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:29] [ns_1@127.0.0.1:<0.8492.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:29] [ns_1@127.0.0.1:<0.8517.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:29] [ns_1@127.0.0.1:<0.8440.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:30] [ns_1@127.0.0.1:<0.8442.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:57:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8491.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:57:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8526.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:57:30] [ns_1@127.0.0.1:<0.8517.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:30] [ns_1@127.0.0.1:<0.8488.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:31] [ns_1@127.0.0.1:<0.8505.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:31] [ns_1@127.0.0.1:<0.8517.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:31] [ns_1@127.0.0.1:<0.8453.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:32] [ns_1@127.0.0.1:<0.8457.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:32] [ns_1@127.0.0.1:<0.8517.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:32] [ns_1@127.0.0.1:<0.8503.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:33] [ns_1@127.0.0.1:<0.8520.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:57:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.8517.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:57:33] [ns_1@127.0.0.1:<0.8471.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:34] [ns_1@127.0.0.1:<0.8473.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:34] [ns_1@127.0.0.1:<0.8513.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:35] [ns_1@127.0.0.1:<0.8537.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:57:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8526.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:57:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8559.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:57:35] [ns_1@127.0.0.1:<0.8484.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:36] [ns_1@127.0.0.1:<0.8486.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:36] [ns_1@127.0.0.1:<0.8532.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:37] [ns_1@127.0.0.1:<0.8550.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:37] [ns_1@127.0.0.1:<0.8498.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:38] [ns_1@127.0.0.1:<0.8500.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:38] [ns_1@127.0.0.1:<0.8545.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:39] [ns_1@127.0.0.1:<0.8566.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:39] [ns_1@127.0.0.1:<0.8587.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:39] [ns_1@127.0.0.1:<0.8509.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:57:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8559.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:57:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8593.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:57:40] [ns_1@127.0.0.1:<0.8511.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:40] [ns_1@127.0.0.1:<0.8587.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:40] [ns_1@127.0.0.1:<0.8556.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:41] [ns_1@127.0.0.1:<0.8576.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:41] [ns_1@127.0.0.1:<0.8587.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:41] [ns_1@127.0.0.1:<0.8523.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:42] [ns_1@127.0.0.1:<0.8528.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:42] [ns_1@127.0.0.1:<0.8587.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:42] [ns_1@127.0.0.1:<0.8572.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:43] [ns_1@127.0.0.1:<0.8590.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:57:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.8587.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:57:43] [ns_1@127.0.0.1:<0.8539.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:44] [ns_1@127.0.0.1:<0.8543.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:44] [ns_1@127.0.0.1:<0.8583.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:57:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8593.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:57:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8627.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:57:45] [ns_1@127.0.0.1:<0.8607.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:46] [ns_1@127.0.0.1:<0.8552.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:46] [ns_1@127.0.0.1:<0.8554.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:47] [ns_1@127.0.0.1:<0.8601.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:47] [ns_1@127.0.0.1:<0.8619.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:48] [ns_1@127.0.0.1:<0.8568.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:48] [ns_1@127.0.0.1:<0.8570.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:49] [ns_1@127.0.0.1:<0.8614.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:49] [ns_1@127.0.0.1:<0.8634.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:49] [ns_1@127.0.0.1:<0.8655.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:50] [ns_1@127.0.0.1:<0.8578.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:57:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8627.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:57:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8661.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:57:50] [ns_1@127.0.0.1:<0.8581.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:50] [ns_1@127.0.0.1:<0.8655.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:51] [ns_1@127.0.0.1:<0.8628.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:51] [ns_1@127.0.0.1:<0.8645.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:51] [ns_1@127.0.0.1:<0.8655.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:52] [ns_1@127.0.0.1:<0.8594.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:52] [ns_1@127.0.0.1:<0.8599.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:52] [ns_1@127.0.0.1:<0.8655.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:53] [ns_1@127.0.0.1:<0.8640.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:53] [ns_1@127.0.0.1:<0.8658.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:57:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.8655.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:57:54] [ns_1@127.0.0.1:<0.8609.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:54] [ns_1@127.0.0.1:<0.8612.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:55] [ns_1@127.0.0.1:<0.8651.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:57:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8661.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:57:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8696.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:57:55] [ns_1@127.0.0.1:<0.8676.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:56] [ns_1@127.0.0.1:<0.8621.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:56] [ns_1@127.0.0.1:<0.8624.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:57] [ns_1@127.0.0.1:<0.8671.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:57] [ns_1@127.0.0.1:<0.8689.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:58] [ns_1@127.0.0.1:<0.8636.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:58] [ns_1@127.0.0.1:<0.8638.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:59] [ns_1@127.0.0.1:<0.8683.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:57:59] [ns_1@127.0.0.1:<0.8703.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:57:59] [ns_1@127.0.0.1:<0.8725.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:57:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,752275,822297}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38160176}, {processes,10133416}, {processes_used,8507936}, {system,28026760}, {atom,1306681}, {atom_used,1284164}, {binary,648456}, {code,12859877}, {ets,2338992}]}, {system_stats, [{cpu_utilization_rate,25.06265664160401}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,3297}, {memory_data,{4040077312,4014178304,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27020 kB\nBuffers: 59760 kB\nCached: 3530556 kB\nSwapCached: 0 kB\nActive: 307348 kB\nInactive: 3443476 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27020 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 80 kB\nWriteback: 0 kB\nAnonPages: 160536 kB\nMapped: 24868 kB\nSlab: 134344 kB\nPageTables: 6464 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580316 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3615289344}, {buffered_memory,61194240}, {free_memory,27668480}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{3292559,0}}, {context_switches,{1118938,0}}, {garbage_collection,{584545,783795789,0}}, {io,{{input,22457808},{output,40597934}}}, {reductions,{242213555,651420}}, {run_queue,0}, {runtime,{46610,150}}]}]}] [stats:error] [2012-03-26 1:58:00] [ns_1@127.0.0.1:<0.8647.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:58:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8696.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:58:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8732.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:58:00] [ns_1@127.0.0.1:<0.8649.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:00] [ns_1@127.0.0.1:<0.8693.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:00] [ns_1@127.0.0.1:<0.8708.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:00] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason shutdown [ns_server:info] [2012-03-26 1:58:00] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 1:58:00] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 1:58:00] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:58:00] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:58:00] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:warn] [2012-03-26 1:58:05] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 1:58:05] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 1:58:05] [ns_1@127.0.0.1:<0.8718.1>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 1:58:05] [ns_1@127.0.0.1:<0.8678.1>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 1:58:05] [ns_1@127.0.0.1:<0.8771.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:05] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 1:58:05] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:58:05] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:58:05] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:58:05] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 1:58:05] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [stats:error] [2012-03-26 1:58:05] [ns_1@127.0.0.1:<0.8691.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:05] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 1:58:05] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 1:58:05] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 1:58:06] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 01:58:07: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 1:58:06] [ns_1@127.0.0.1:<0.8705.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:06] [ns_1@127.0.0.1:<0.8771.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:06] [ns_1@127.0.0.1:<0.8697.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:07] [ns_1@127.0.0.1:<0.8710.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:07] [ns_1@127.0.0.1:<0.8771.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:07] [ns_1@127.0.0.1:<0.8785.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:08] [ns_1@127.0.0.1:<0.8716.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:08] [ns_1@127.0.0.1:<0.8771.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:08] [ns_1@127.0.0.1:<0.8790.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:09] [ns_1@127.0.0.1:<0.8720.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.8771.1>} [ns_server:info] [2012-03-26 1:58:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:58:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.8771.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:58:09] [ns_1@127.0.0.1:<0.8797.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:58:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8732.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:58:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8813.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:58:10] [ns_1@127.0.0.1:<0.8733.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:10] [ns_1@127.0.0.1:<0.8803.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:11] [ns_1@127.0.0.1:<0.8764.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:11] [ns_1@127.0.0.1:<0.8814.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:12] [ns_1@127.0.0.1:<0.8735.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:12] [ns_1@127.0.0.1:<0.8820.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:13] [ns_1@127.0.0.1:<0.8765.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:13] [ns_1@127.0.0.1:<0.8827.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:14] [ns_1@127.0.0.1:<0.8738.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:14] [ns_1@127.0.0.1:<0.8831.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:58:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8813.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:58:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8843.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:58:15] [ns_1@127.0.0.1:<0.8766.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:15] [ns_1@127.0.0.1:<0.8837.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:16] [ns_1@127.0.0.1:<0.8741.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:16] [ns_1@127.0.0.1:<0.8844.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:17] [ns_1@127.0.0.1:<0.8767.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:18] [ns_1@127.0.0.1:<0.8852.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:18] [ns_1@127.0.0.1:<0.8743.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:19] [ns_1@127.0.0.1:<0.8856.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:19] [ns_1@127.0.0.1:<0.8773.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:19] [ns_1@127.0.0.1:<0.8871.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:20] [ns_1@127.0.0.1:<0.8863.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:58:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8843.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:58:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8877.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:58:20] [ns_1@127.0.0.1:<0.8745.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:20] [ns_1@127.0.0.1:<0.8871.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:21] [ns_1@127.0.0.1:<0.8867.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:21] [ns_1@127.0.0.1:<0.8795.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:21] [ns_1@127.0.0.1:<0.8871.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:22] [ns_1@127.0.0.1:<0.8878.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:22] [ns_1@127.0.0.1:<0.8788.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:22] [ns_1@127.0.0.1:<0.8871.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:23] [ns_1@127.0.0.1:<0.8887.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:23] [ns_1@127.0.0.1:<0.8810.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:58:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.8871.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:58:24] [ns_1@127.0.0.1:<0.8894.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:24] [ns_1@127.0.0.1:<0.8801.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:25] [ns_1@127.0.0.1:<0.8899.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:58:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8877.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:58:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8912.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:58:25] [ns_1@127.0.0.1:<0.8825.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:26] [ns_1@127.0.0.1:<0.8907.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:26] [ns_1@127.0.0.1:<0.8818.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:27] [ns_1@127.0.0.1:<0.8913.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:27] [ns_1@127.0.0.1:<0.8835.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:28] [ns_1@127.0.0.1:<0.8921.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:28] [ns_1@127.0.0.1:<0.8829.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:29] [ns_1@127.0.0.1:<0.8926.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:29] [ns_1@127.0.0.1:<0.8850.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:29] [ns_1@127.0.0.1:<0.8940.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:30] [ns_1@127.0.0.1:<0.8932.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:58:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8912.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:58:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8947.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:58:30] [ns_1@127.0.0.1:<0.8840.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:30] [ns_1@127.0.0.1:<0.8940.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:31] [ns_1@127.0.0.1:<0.8936.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:31] [ns_1@127.0.0.1:<0.8861.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:31] [ns_1@127.0.0.1:<0.8940.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:32] [ns_1@127.0.0.1:<0.8948.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:32] [ns_1@127.0.0.1:<0.8854.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:32] [ns_1@127.0.0.1:<0.8940.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:33] [ns_1@127.0.0.1:<0.8955.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:33] [ns_1@127.0.0.1:<0.8874.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:58:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.8940.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:58:34] [ns_1@127.0.0.1:<0.8962.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:34] [ns_1@127.0.0.1:<0.8865.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:35] [ns_1@127.0.0.1:<0.8968.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:58:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8947.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:58:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.8980.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:58:35] [ns_1@127.0.0.1:<0.8892.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:36] [ns_1@127.0.0.1:<0.8975.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:36] [ns_1@127.0.0.1:<0.8885.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:37] [ns_1@127.0.0.1:<0.8981.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:37] [ns_1@127.0.0.1:<0.8905.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:38] [ns_1@127.0.0.1:<0.8991.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:38] [ns_1@127.0.0.1:<0.8897.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:39] [ns_1@127.0.0.1:<0.8997.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:39] [ns_1@127.0.0.1:<0.9008.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:39] [ns_1@127.0.0.1:<0.8919.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:40] [ns_1@127.0.0.1:<0.9001.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:58:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.8980.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:58:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9016.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:58:40] [ns_1@127.0.0.1:<0.9008.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:40] [ns_1@127.0.0.1:<0.8909.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:41] [ns_1@127.0.0.1:<0.9011.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:41] [ns_1@127.0.0.1:<0.9008.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:41] [ns_1@127.0.0.1:<0.8930.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:42] [ns_1@127.0.0.1:<0.9018.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:42] [ns_1@127.0.0.1:<0.9008.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:42] [ns_1@127.0.0.1:<0.8924.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:43] [ns_1@127.0.0.1:<0.9028.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:58:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.9008.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:58:43] [ns_1@127.0.0.1:<0.8944.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:44] [ns_1@127.0.0.1:<0.9033.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:44] [ns_1@127.0.0.1:<0.8934.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:58:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9016.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:58:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9048.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:58:45] [ns_1@127.0.0.1:<0.9040.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:45] [ns_1@127.0.0.1:<0.8960.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:46] [ns_1@127.0.0.1:<0.9045.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:46] [ns_1@127.0.0.1:<0.8953.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:47] [ns_1@127.0.0.1:<0.9055.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:47] [ns_1@127.0.0.1:<0.8973.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:48] [ns_1@127.0.0.1:<0.9059.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:48] [ns_1@127.0.0.1:<0.8966.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:49] [ns_1@127.0.0.1:<0.9066.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:49] [ns_1@127.0.0.1:<0.9076.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:49] [ns_1@127.0.0.1:<0.8989.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:58:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9048.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:58:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9082.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:58:50] [ns_1@127.0.0.1:<0.9070.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:50] [ns_1@127.0.0.1:<0.9076.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:50] [ns_1@127.0.0.1:<0.8977.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:51] [ns_1@127.0.0.1:<0.9079.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:51] [ns_1@127.0.0.1:<0.9076.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:52] [ns_1@127.0.0.1:<0.8999.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:52] [ns_1@127.0.0.1:<0.9090.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:52] [ns_1@127.0.0.1:<0.9076.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:53] [ns_1@127.0.0.1:<0.8993.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:53] [ns_1@127.0.0.1:<0.9097.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:58:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.9076.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:58:54] [ns_1@127.0.0.1:<0.9013.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:54] [ns_1@127.0.0.1:<0.9102.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:55] [ns_1@127.0.0.1:<0.9004.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:58:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9082.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:58:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9117.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:58:55] [ns_1@127.0.0.1:<0.9110.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:56] [ns_1@127.0.0.1:<0.9030.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:56] [ns_1@127.0.0.1:<0.9114.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:57] [ns_1@127.0.0.1:<0.9022.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:57] [ns_1@127.0.0.1:<0.9124.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:58] [ns_1@127.0.0.1:<0.9042.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:58] [ns_1@127.0.0.1:<0.9129.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:59] [ns_1@127.0.0.1:<0.9035.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:58:59] [ns_1@127.0.0.1:<0.9135.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:58:59] [ns_1@127.0.0.1:<0.9146.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:58:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,752335,852277}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38237680}, {processes,10146664}, {processes_used,8521184}, {system,28091016}, {atom,1306681}, {atom_used,1284164}, {binary,677176}, {code,12859877}, {ets,2367328}]}, {system_stats, [{cpu_utilization_rate,25.6857855361596}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,3357}, {memory_data,{4040077312,4012408832,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26524 kB\nBuffers: 59848 kB\nCached: 3530720 kB\nSwapCached: 0 kB\nActive: 308700 kB\nInactive: 3443596 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26524 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 76 kB\nWriteback: 0 kB\nAnonPages: 161720 kB\nMapped: 24868 kB\nSlab: 134364 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580192 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3615457280}, {buffered_memory,61284352}, {free_memory,27160576}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{3352589,0}}, {context_switches,{1132123,0}}, {garbage_collection,{591727,795342503,0}}, {io,{{input,22494550},{output,41079455}}}, {reductions,{244832419,634228}}, {run_queue,0}, {runtime,{47260,160}}]}]}] [stats:error] [2012-03-26 1:59:00] [ns_1@127.0.0.1:<0.9057.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:59:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9117.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:59:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9153.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:59:00] [ns_1@127.0.0.1:<0.9139.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:00] [ns_1@127.0.0.1:<0.9146.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:01] [ns_1@127.0.0.1:<0.9049.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:01] [ns_1@127.0.0.1:<0.9150.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:01] [ns_1@127.0.0.1:<0.9146.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:02] [ns_1@127.0.0.1:<0.9068.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:02] [ns_1@127.0.0.1:<0.9159.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:02] [ns_1@127.0.0.1:<0.9146.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:03] [ns_1@127.0.0.1:<0.9061.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:03] [ns_1@127.0.0.1:<0.9166.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:59:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.9146.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:59:04] [ns_1@127.0.0.1:<0.9083.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:04] [ns_1@127.0.0.1:<0.9172.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:05] [ns_1@127.0.0.1:<0.9072.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:59:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9153.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:59:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9186.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:59:05] [ns_1@127.0.0.1:<0.9179.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:06] [ns_1@127.0.0.1:<0.9099.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:09] [ns_1@127.0.0.1:<0.9204.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:59:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9186.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:59:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9208.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 1:59:10] [ns_1@127.0.0.1:<0.9204.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:11] [ns_1@127.0.0.1:<0.9204.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:12] [ns_1@127.0.0.1:<0.9204.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:59:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.9204.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 1:59:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9208.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:59:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9222.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:59:16] [ns_1@127.0.0.1:<0.9112.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:16] [ns_1@127.0.0.1:<0.9183.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:17] [ns_1@127.0.0.1:<0.9092.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:17] [ns_1@127.0.0.1:<0.9195.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:18] [ns_1@127.0.0.1:<0.9126.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:18] [ns_1@127.0.0.1:<0.9231.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:19] [ns_1@127.0.0.1:<0.9104.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:19] [ns_1@127.0.0.1:<0.9227.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:19] [ns_1@127.0.0.1:<0.9248.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:20] [ns_1@127.0.0.1:<0.9137.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:59:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9222.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:59:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9254.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:59:20] [ns_1@127.0.0.1:<0.9242.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:20] [ns_1@127.0.0.1:<0.9248.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:21] [ns_1@127.0.0.1:<0.9118.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:21] [ns_1@127.0.0.1:<0.9248.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:21] [ns_1@127.0.0.1:<0.9238.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:22] [ns_1@127.0.0.1:<0.9154.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:22] [ns_1@127.0.0.1:<0.9248.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:22] [ns_1@127.0.0.1:<0.9262.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:23] [ns_1@127.0.0.1:<0.9131.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:59:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.9248.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:59:23] [ns_1@127.0.0.1:<0.9251.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:24] [ns_1@127.0.0.1:<0.9168.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:24] [ns_1@127.0.0.1:<0.9274.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:25] [ns_1@127.0.0.1:<0.9141.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:59:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9254.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:59:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9289.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:59:25] [ns_1@127.0.0.1:<0.9269.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:26] [ns_1@127.0.0.1:<0.9181.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:26] [ns_1@127.0.0.1:<0.9286.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:27] [ns_1@127.0.0.1:<0.9161.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:27] [ns_1@127.0.0.1:<0.9282.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:28] [ns_1@127.0.0.1:<0.9229.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:28] [ns_1@127.0.0.1:<0.9301.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:29] [ns_1@127.0.0.1:<0.9174.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:29] [ns_1@127.0.0.1:<0.9315.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:29] [ns_1@127.0.0.1:<0.9296.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:59:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9289.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:59:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9322.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:59:30] [ns_1@127.0.0.1:<0.9240.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:30] [ns_1@127.0.0.1:<0.9315.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:30] [ns_1@127.0.0.1:<0.9311.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:31] [ns_1@127.0.0.1:<0.9187.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:31] [ns_1@127.0.0.1:<0.9315.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:31] [ns_1@127.0.0.1:<0.9307.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:32] [ns_1@127.0.0.1:<0.9255.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:32] [ns_1@127.0.0.1:<0.9315.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:32] [ns_1@127.0.0.1:<0.9330.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:33] [ns_1@127.0.0.1:<0.9233.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:59:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.9315.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:59:33] [ns_1@127.0.0.1:<0.9323.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:34] [ns_1@127.0.0.1:<0.9272.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:34] [ns_1@127.0.0.1:<0.9343.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:59:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9322.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:59:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9355.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:59:35] [ns_1@127.0.0.1:<0.9244.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:35] [ns_1@127.0.0.1:<0.9337.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:36] [ns_1@127.0.0.1:<0.9284.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:36] [ns_1@127.0.0.1:<0.9356.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:37] [ns_1@127.0.0.1:<0.9267.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:38] [ns_1@127.0.0.1:<0.9350.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:38] [ns_1@127.0.0.1:<0.9299.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:39] [ns_1@127.0.0.1:<0.9370.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:39] [ns_1@127.0.0.1:<0.9279.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:39] [ns_1@127.0.0.1:<0.9385.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:40] [ns_1@127.0.0.1:<0.9366.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:59:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9355.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:59:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9391.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:59:40] [ns_1@127.0.0.1:<0.9309.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:40] [ns_1@127.0.0.1:<0.9385.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:41] [ns_1@127.0.0.1:<0.9381.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:41] [ns_1@127.0.0.1:<0.9292.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:41] [ns_1@127.0.0.1:<0.9385.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:42] [ns_1@127.0.0.1:<0.9376.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:42] [ns_1@127.0.0.1:<0.9328.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:42] [ns_1@127.0.0.1:<0.9385.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:43] [ns_1@127.0.0.1:<0.9399.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:43] [ns_1@127.0.0.1:<0.9305.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:59:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.9385.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:59:44] [ns_1@127.0.0.1:<0.9392.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:44] [ns_1@127.0.0.1:<0.9341.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:45] [ns_1@127.0.0.1:<0.9412.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:59:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9391.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:59:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9425.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:59:45] [ns_1@127.0.0.1:<0.9319.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:46] [ns_1@127.0.0.1:<0.9407.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:46] [ns_1@127.0.0.1:<0.9352.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:47] [ns_1@127.0.0.1:<0.9426.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:47] [ns_1@127.0.0.1:<0.9335.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:48] [ns_1@127.0.0.1:<0.9419.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:48] [ns_1@127.0.0.1:<0.9368.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:49] [ns_1@127.0.0.1:<0.9438.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:49] [ns_1@127.0.0.1:<0.9348.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:49] [ns_1@127.0.0.1:<0.9453.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:50] [ns_1@127.0.0.1:<0.9434.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:59:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9425.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:59:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9459.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:59:50] [ns_1@127.0.0.1:<0.9379.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:50] [ns_1@127.0.0.1:<0.9453.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:51] [ns_1@127.0.0.1:<0.9449.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:51] [ns_1@127.0.0.1:<0.9364.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:51] [ns_1@127.0.0.1:<0.9453.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:52] [ns_1@127.0.0.1:<0.9445.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:52] [ns_1@127.0.0.1:<0.9397.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:52] [ns_1@127.0.0.1:<0.9453.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:53] [ns_1@127.0.0.1:<0.9469.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:53] [ns_1@127.0.0.1:<0.9374.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 1:59:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.9453.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 1:59:54] [ns_1@127.0.0.1:<0.9460.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:54] [ns_1@127.0.0.1:<0.9410.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:55] [ns_1@127.0.0.1:<0.9481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 1:59:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9459.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 1:59:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9494.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 1:59:55] [ns_1@127.0.0.1:<0.9388.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:56] [ns_1@127.0.0.1:<0.9476.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:56] [ns_1@127.0.0.1:<0.9422.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:57] [ns_1@127.0.0.1:<0.9495.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:57] [ns_1@127.0.0.1:<0.9405.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:58] [ns_1@127.0.0.1:<0.9489.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:58] [ns_1@127.0.0.1:<0.9436.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 1:59:59] [ns_1@127.0.0.1:<0.9510.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 1:59:59] [ns_1@127.0.0.1:<0.9536.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 1:59:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,752395,878250}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38144096}, {processes,10070704}, {processes_used,8445224}, {system,28073392}, {atom,1306681}, {atom_used,1284164}, {binary,682608}, {code,12859877}, {ets,2337048}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,3417}, {memory_data,{4040077312,4013043712,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26028 kB\nBuffers: 59908 kB\nCached: 3530880 kB\nSwapCached: 0 kB\nActive: 308924 kB\nInactive: 3443668 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26028 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 80 kB\nWriteback: 0 kB\nAnonPages: 161800 kB\nMapped: 24868 kB\nSlab: 134372 kB\nPageTables: 6464 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580192 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3615621120}, {buffered_memory,61345792}, {free_memory,26652672}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{3412614,0}}, {context_switches,{1144264,0}}, {garbage_collection,{598659,805221536,0}}, {io,{{input,22525112},{output,41482437}}}, {reductions,{247215180,667726}}, {run_queue,0}, {runtime,{47780,130}}]}]}] [stats:error] [2012-03-26 1:59:59] [ns_1@127.0.0.1:<0.9417.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:00] [ns_1@127.0.0.1:<0.9503.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:00:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9494.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:00:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9545.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:00:00] [ns_1@127.0.0.1:<0.9536.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:00] [ns_1@127.0.0.1:<0.9447.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:01] [ns_1@127.0.0.1:<0.9539.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:01] [ns_1@127.0.0.1:<0.9536.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:01] [ns_1@127.0.0.1:<0.9432.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:02] [ns_1@127.0.0.1:<0.9514.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:02] [ns_1@127.0.0.1:<0.9536.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:02] [ns_1@127.0.0.1:<0.9467.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:03] [ns_1@127.0.0.1:<0.9556.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:00:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.9536.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:00:03] [ns_1@127.0.0.1:<0.9443.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:04] [ns_1@127.0.0.1:<0.9547.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:04] [ns_1@127.0.0.1:<0.9479.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:05] [ns_1@127.0.0.1:<0.9569.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:00:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9545.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:00:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9578.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:00:05] [ns_1@127.0.0.1:<0.9456.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:06] [ns_1@127.0.0.1:<0.9562.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:06] [ns_1@127.0.0.1:<0.9491.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:07] [ns_1@127.0.0.1:<0.9585.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:07] [ns_1@127.0.0.1:<0.9474.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:08] [ns_1@127.0.0.1:<0.9573.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:08] [ns_1@127.0.0.1:<0.9506.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:09] [ns_1@127.0.0.1:<0.9595.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:09] [ns_1@127.0.0.1:<0.9608.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:09] [ns_1@127.0.0.1:<0.9487.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:00:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9578.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:00:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9614.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:00:10] [ns_1@127.0.0.1:<0.9589.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:10] [ns_1@127.0.0.1:<0.9608.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:10] [ns_1@127.0.0.1:<0.9516.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:11] [ns_1@127.0.0.1:<0.9611.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:11] [ns_1@127.0.0.1:<0.9608.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:11] [ns_1@127.0.0.1:<0.9501.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:12] [ns_1@127.0.0.1:<0.9600.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:12] [ns_1@127.0.0.1:<0.9608.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:13] [ns_1@127.0.0.1:<0.9551.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:13] [ns_1@127.0.0.1:<0.9628.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:00:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.9608.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:00:14] [ns_1@127.0.0.1:<0.9512.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:14] [ns_1@127.0.0.1:<0.9620.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:15] [ns_1@127.0.0.1:<0.9564.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:00:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9614.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:00:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9648.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:00:15] [ns_1@127.0.0.1:<0.9640.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:16] [ns_1@127.0.0.1:<0.9542.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:16] [ns_1@127.0.0.1:<0.9633.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:16] [ns_1@127.0.0.1:<0.9645.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:16] [ns_1@127.0.0.1:<0.9575.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:17] [ns_1@127.0.0.1:<0.9591.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:17] [ns_1@127.0.0.1:<0.9655.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:18] [ns_1@127.0.0.1:<0.9558.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:18] [ns_1@127.0.0.1:<0.9663.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:19] [ns_1@127.0.0.1:<0.9602.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:19] [ns_1@127.0.0.1:<0.9670.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:19] [ns_1@127.0.0.1:<0.9680.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:20] [ns_1@127.0.0.1:<0.9571.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:00:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9648.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:00:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9686.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:00:20] [ns_1@127.0.0.1:<0.9674.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:20] [ns_1@127.0.0.1:<0.9680.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:21] [ns_1@127.0.0.1:<0.9622.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:21] [ns_1@127.0.0.1:<0.9683.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:21] [ns_1@127.0.0.1:<0.9680.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:22] [ns_1@127.0.0.1:<0.9587.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:22] [ns_1@127.0.0.1:<0.9694.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:22] [ns_1@127.0.0.1:<0.9680.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:23] [ns_1@127.0.0.1:<0.9635.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:23] [ns_1@127.0.0.1:<0.9701.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:00:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.9680.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:00:24] [ns_1@127.0.0.1:<0.9597.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:24] [ns_1@127.0.0.1:<0.9706.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:25] [ns_1@127.0.0.1:<0.9649.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:00:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9686.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:00:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9721.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:00:25] [ns_1@127.0.0.1:<0.9714.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:26] [ns_1@127.0.0.1:<0.9615.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:26] [ns_1@127.0.0.1:<0.9718.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:27] [ns_1@127.0.0.1:<0.9665.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:27] [ns_1@127.0.0.1:<0.9728.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:28] [ns_1@127.0.0.1:<0.9630.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:28] [ns_1@127.0.0.1:<0.9733.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:29] [ns_1@127.0.0.1:<0.9676.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:29] [ns_1@127.0.0.1:<0.9739.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:29] [ns_1@127.0.0.1:<0.9749.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:30] [ns_1@127.0.0.1:<0.9642.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:00:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9721.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:00:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9756.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:00:30] [ns_1@127.0.0.1:<0.9743.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:30] [ns_1@127.0.0.1:<0.9749.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:31] [ns_1@127.0.0.1:<0.9696.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:31] [ns_1@127.0.0.1:<0.9749.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:31] [ns_1@127.0.0.1:<0.9753.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:32] [ns_1@127.0.0.1:<0.9657.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:32] [ns_1@127.0.0.1:<0.9749.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:32] [ns_1@127.0.0.1:<0.9762.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:33] [ns_1@127.0.0.1:<0.9708.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:00:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.9749.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:00:33] [ns_1@127.0.0.1:<0.9769.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:34] [ns_1@127.0.0.1:<0.9659.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:34] [ns_1@127.0.0.1:<0.9775.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:35] [ns_1@127.0.0.1:<0.9722.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:00:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9756.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:00:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9789.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:00:35] [ns_1@127.0.0.1:<0.9782.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:36] [ns_1@127.0.0.1:<0.9661.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:36] [ns_1@127.0.0.1:<0.9786.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:37] [ns_1@127.0.0.1:<0.9735.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:37] [ns_1@127.0.0.1:<0.9798.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:38] [ns_1@127.0.0.1:<0.9672.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:38] [ns_1@127.0.0.1:<0.9802.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:39] [ns_1@127.0.0.1:<0.9745.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:39] [ns_1@127.0.0.1:<0.9817.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:39] [ns_1@127.0.0.1:<0.9808.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:00:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9789.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:00:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9823.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:00:40] [ns_1@127.0.0.1:<0.9687.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:40] [ns_1@127.0.0.1:<0.9817.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:40] [ns_1@127.0.0.1:<0.9813.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:41] [ns_1@127.0.0.1:<0.9767.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:41] [ns_1@127.0.0.1:<0.9817.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:41] [ns_1@127.0.0.1:<0.9824.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:42] [ns_1@127.0.0.1:<0.9703.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:42] [ns_1@127.0.0.1:<0.9817.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:42] [ns_1@127.0.0.1:<0.9831.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:43] [ns_1@127.0.0.1:<0.9780.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:00:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.9817.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:00:43] [ns_1@127.0.0.1:<0.9839.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:44] [ns_1@127.0.0.1:<0.9716.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:44] [ns_1@127.0.0.1:<0.9844.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:00:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9823.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:00:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9857.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:00:45] [ns_1@127.0.0.1:<0.9793.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:45] [ns_1@127.0.0.1:<0.9851.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:46] [ns_1@127.0.0.1:<0.9730.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:47] [ns_1@127.0.0.1:<0.9858.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:47] [ns_1@127.0.0.1:<0.9806.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:48] [ns_1@127.0.0.1:<0.9866.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:48] [ns_1@127.0.0.1:<0.9741.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:49] [ns_1@127.0.0.1:<0.9870.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:49] [ns_1@127.0.0.1:<0.9820.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:49] [ns_1@127.0.0.1:<0.9885.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:50] [ns_1@127.0.0.1:<0.9877.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:00:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9857.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:00:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9891.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:00:50] [ns_1@127.0.0.1:<0.9757.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:50] [ns_1@127.0.0.1:<0.9885.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:51] [ns_1@127.0.0.1:<0.9881.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:51] [ns_1@127.0.0.1:<0.9837.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:51] [ns_1@127.0.0.1:<0.9885.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:52] [ns_1@127.0.0.1:<0.9892.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:52] [ns_1@127.0.0.1:<0.9772.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:52] [ns_1@127.0.0.1:<0.9885.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:53] [ns_1@127.0.0.1:<0.9901.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:53] [ns_1@127.0.0.1:<0.9849.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:00:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.9885.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:00:54] [ns_1@127.0.0.1:<0.9908.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:54] [ns_1@127.0.0.1:<0.9784.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:55] [ns_1@127.0.0.1:<0.9913.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:00:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9891.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:00:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9926.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:00:55] [ns_1@127.0.0.1:<0.9864.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:56] [ns_1@127.0.0.1:<0.9921.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:56] [ns_1@127.0.0.1:<0.9800.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:57] [ns_1@127.0.0.1:<0.9927.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:57] [ns_1@127.0.0.1:<0.9875.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:58] [ns_1@127.0.0.1:<0.9935.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:58] [ns_1@127.0.0.1:<0.9811.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:59] [ns_1@127.0.0.1:<0.9940.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:00:59] [ns_1@127.0.0.1:<0.9888.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:00:59] [ns_1@127.0.0.1:<0.9955.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:00:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,752455,907292}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38443328}, {processes,10329272}, {processes_used,8703792}, {system,28114056}, {atom,1306681}, {atom_used,1284164}, {binary,685472}, {code,12859877}, {ets,2368360}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,3477}, {memory_data,{4040077312,4013551616,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25532 kB\nBuffers: 60016 kB\nCached: 3531024 kB\nSwapCached: 0 kB\nActive: 309012 kB\nInactive: 3443896 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25532 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 64 kB\nWriteback: 0 kB\nAnonPages: 161860 kB\nMapped: 24868 kB\nSlab: 134384 kB\nPageTables: 6464 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582328 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3615768576}, {buffered_memory,61456384}, {free_memory,26144768}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{3472643,0}}, {context_switches,{1157599,0}}, {garbage_collection,{605874,816498012,0}}, {io,{{input,22801727},{output,42147102}}}, {reductions,{249846024,626719}}, {run_queue,0}, {runtime,{48390,150}}]}]}] [stats:error] [2012-03-26 2:01:00] [ns_1@127.0.0.1:<0.9946.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:01:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9926.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:01:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9962.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:01:00] [ns_1@127.0.0.1:<0.9829.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:00] [ns_1@127.0.0.1:<0.9955.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:01] [ns_1@127.0.0.1:<0.9950.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:01] [ns_1@127.0.0.1:<0.9906.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:01] [ns_1@127.0.0.1:<0.9955.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:02] [ns_1@127.0.0.1:<0.9963.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:02] [ns_1@127.0.0.1:<0.9842.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:02] [ns_1@127.0.0.1:<0.9955.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:03] [ns_1@127.0.0.1:<0.9970.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:03] [ns_1@127.0.0.1:<0.9919.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:01:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.9955.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:01:04] [ns_1@127.0.0.1:<0.9977.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:04] [ns_1@127.0.0.1:<0.9854.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:05] [ns_1@127.0.0.1:<0.9983.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:01:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9962.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:01:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.9995.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:01:05] [ns_1@127.0.0.1:<0.9933.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:06] [ns_1@127.0.0.1:<0.9990.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:06] [ns_1@127.0.0.1:<0.9868.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:07] [ns_1@127.0.0.1:<0.9998.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:07] [ns_1@127.0.0.1:<0.9944.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:08] [ns_1@127.0.0.1:<0.10006.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:08] [ns_1@127.0.0.1:<0.9879.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:09] [ns_1@127.0.0.1:<0.10012.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:09] [ns_1@127.0.0.1:<0.10025.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:09] [ns_1@127.0.0.1:<0.9959.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:10] [ns_1@127.0.0.1:<0.10017.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:01:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.9995.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:01:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10033.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:01:10] [ns_1@127.0.0.1:<0.10025.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:10] [ns_1@127.0.0.1:<0.9899.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:11] [ns_1@127.0.0.1:<0.10028.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:11] [ns_1@127.0.0.1:<0.10025.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:11] [ns_1@127.0.0.1:<0.9975.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:12] [ns_1@127.0.0.1:<0.10035.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:12] [ns_1@127.0.0.1:<0.10025.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:12] [ns_1@127.0.0.1:<0.9911.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:13] [ns_1@127.0.0.1:<0.10045.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:01:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.10025.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:01:13] [ns_1@127.0.0.1:<0.9988.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:14] [ns_1@127.0.0.1:<0.10050.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:14] [ns_1@127.0.0.1:<0.9923.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:01:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10033.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:01:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10065.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:01:15] [ns_1@127.0.0.1:<0.10057.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:15] [ns_1@127.0.0.1:<0.10004.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:16] [ns_1@127.0.0.1:<0.10062.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:16] [ns_1@127.0.0.1:<0.9938.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:16] [ns_1@127.0.0.1:<0.9948.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:16] [ns_1@127.0.0.1:<0.9968.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:17] [ns_1@127.0.0.1:<0.10072.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:17] [ns_1@127.0.0.1:<0.10014.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:18] [ns_1@127.0.0.1:<0.10076.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:18] [ns_1@127.0.0.1:<0.9981.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:19] [ns_1@127.0.0.1:<0.10087.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:19] [ns_1@127.0.0.1:<0.10097.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:19] [ns_1@127.0.0.1:<0.10030.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:01:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10065.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:01:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10103.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:01:20] [ns_1@127.0.0.1:<0.10078.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:20] [ns_1@127.0.0.1:<0.10097.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:21] [ns_1@127.0.0.1:<0.9992.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:21] [ns_1@127.0.0.1:<0.10100.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:21] [ns_1@127.0.0.1:<0.10097.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:22] [ns_1@127.0.0.1:<0.10047.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:22] [ns_1@127.0.0.1:<0.10080.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:22] [ns_1@127.0.0.1:<0.10097.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:23] [ns_1@127.0.0.1:<0.10008.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:23] [ns_1@127.0.0.1:<0.10118.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:01:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.10097.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:01:24] [ns_1@127.0.0.1:<0.10059.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:24] [ns_1@127.0.0.1:<0.10091.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:25] [ns_1@127.0.0.1:<0.10019.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:01:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10103.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:01:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10138.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:01:25] [ns_1@127.0.0.1:<0.10131.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:26] [ns_1@127.0.0.1:<0.10074.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:26] [ns_1@127.0.0.1:<0.10111.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:27] [ns_1@127.0.0.1:<0.10039.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:27] [ns_1@127.0.0.1:<0.10145.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:28] [ns_1@127.0.0.1:<0.10089.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:28] [ns_1@127.0.0.1:<0.10123.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:29] [ns_1@127.0.0.1:<0.10052.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:29] [ns_1@127.0.0.1:<0.10156.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:29] [ns_1@127.0.0.1:<0.10166.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:30] [ns_1@127.0.0.1:<0.10104.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:01:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10138.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:01:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10173.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:01:30] [ns_1@127.0.0.1:<0.10135.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:30] [ns_1@127.0.0.1:<0.10166.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:31] [ns_1@127.0.0.1:<0.10066.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:31] [ns_1@127.0.0.1:<0.10170.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:31] [ns_1@127.0.0.1:<0.10166.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:32] [ns_1@127.0.0.1:<0.10120.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:32] [ns_1@127.0.0.1:<0.10150.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:32] [ns_1@127.0.0.1:<0.10166.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:33] [ns_1@127.0.0.1:<0.10082.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:33] [ns_1@127.0.0.1:<0.10186.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:01:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.10166.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:01:34] [ns_1@127.0.0.1:<0.10133.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:34] [ns_1@127.0.0.1:<0.10160.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:35] [ns_1@127.0.0.1:<0.10093.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:01:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10173.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:01:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10206.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:01:35] [ns_1@127.0.0.1:<0.10199.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:36] [ns_1@127.0.0.1:<0.10147.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:36] [ns_1@127.0.0.1:<0.10179.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:37] [ns_1@127.0.0.1:<0.10113.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:37] [ns_1@127.0.0.1:<0.10215.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:38] [ns_1@127.0.0.1:<0.10158.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:38] [ns_1@127.0.0.1:<0.10192.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:39] [ns_1@127.0.0.1:<0.10125.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:39] [ns_1@127.0.0.1:<0.10225.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:39] [ns_1@127.0.0.1:<0.10236.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:40] [ns_1@127.0.0.1:<0.10174.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:01:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10206.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:01:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10242.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:01:40] [ns_1@127.0.0.1:<0.10236.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:40] [ns_1@127.0.0.1:<0.10203.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:41] [ns_1@127.0.0.1:<0.10139.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:41] [ns_1@127.0.0.1:<0.10236.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:41] [ns_1@127.0.0.1:<0.10239.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:42] [ns_1@127.0.0.1:<0.10188.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:42] [ns_1@127.0.0.1:<0.10236.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:42] [ns_1@127.0.0.1:<0.10219.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:43] [ns_1@127.0.0.1:<0.10152.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:01:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.10236.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:01:43] [ns_1@127.0.0.1:<0.10256.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:44] [ns_1@127.0.0.1:<0.10201.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:44] [ns_1@127.0.0.1:<0.10230.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:45] [ns_1@127.0.0.1:<0.10162.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:01:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10242.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:01:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10276.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:01:45] [ns_1@127.0.0.1:<0.10268.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:46] [ns_1@127.0.0.1:<0.10217.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:46] [ns_1@127.0.0.1:<0.10248.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:47] [ns_1@127.0.0.1:<0.10181.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:47] [ns_1@127.0.0.1:<0.10283.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:48] [ns_1@127.0.0.1:<0.10227.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:48] [ns_1@127.0.0.1:<0.10261.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:49] [ns_1@127.0.0.1:<0.10194.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:49] [ns_1@127.0.0.1:<0.10302.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:49] [ns_1@127.0.0.1:<0.10294.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:01:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10276.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:01:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10308.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:01:50] [ns_1@127.0.0.1:<0.10244.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:50] [ns_1@127.0.0.1:<0.10302.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:50] [ns_1@127.0.0.1:<0.10273.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:51] [ns_1@127.0.0.1:<0.10207.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:51] [ns_1@127.0.0.1:<0.10302.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:51] [ns_1@127.0.0.1:<0.10309.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:52] [ns_1@127.0.0.1:<0.10259.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:52] [ns_1@127.0.0.1:<0.10302.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:52] [ns_1@127.0.0.1:<0.10287.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:53] [ns_1@127.0.0.1:<0.10221.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:01:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.10302.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:01:53] [ns_1@127.0.0.1:<0.10325.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:54] [ns_1@127.0.0.1:<0.10271.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:54] [ns_1@127.0.0.1:<0.10298.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:01:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10308.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:01:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10343.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:01:55] [ns_1@127.0.0.1:<0.10232.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:56] [ns_1@127.0.0.1:<0.10338.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:56] [ns_1@127.0.0.1:<0.10285.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:57] [ns_1@127.0.0.1:<0.10318.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:57] [ns_1@127.0.0.1:<0.10253.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:58] [ns_1@127.0.0.1:<0.10352.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:58] [ns_1@127.0.0.1:<0.10296.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:59] [ns_1@127.0.0.1:<0.10330.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:01:59] [ns_1@127.0.0.1:<0.10266.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:01:59] [ns_1@127.0.0.1:<0.10372.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:01:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,752515,931297}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38251464}, {processes,10168792}, {processes_used,8543312}, {system,28082672}, {atom,1306681}, {atom_used,1284164}, {binary,674640}, {code,12859877}, {ets,2340616}]}, {system_stats, [{cpu_utilization_rate,25.06265664160401}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,3537}, {memory_data,{4040077312,4013932544,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 24292 kB\nBuffers: 60084 kB\nCached: 3531180 kB\nSwapCached: 0 kB\nActive: 309580 kB\nInactive: 3443920 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 24292 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 68 kB\nWriteback: 0 kB\nAnonPages: 162108 kB\nMapped: 24868 kB\nSlab: 134360 kB\nPageTables: 6464 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 584436 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3615928320}, {buffered_memory,61526016}, {free_memory,24875008}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{3532667,0}}, {context_switches,{1170648,0}}, {garbage_collection,{613208,827729080,0}}, {io,{{input,22832334},{output,42589691}}}, {reductions,{252472730,624602}}, {run_queue,0}, {runtime,{48980,140}}]}]}] [stats:error] [2012-03-26 2:02:00] [ns_1@127.0.0.1:<0.10363.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:02:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10343.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:02:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10379.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:02:00] [ns_1@127.0.0.1:<0.10316.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:00] [ns_1@127.0.0.1:<0.10372.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:01] [ns_1@127.0.0.1:<0.10344.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:01] [ns_1@127.0.0.1:<0.10279.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:01] [ns_1@127.0.0.1:<0.10372.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:02] [ns_1@127.0.0.1:<0.10380.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:02] [ns_1@127.0.0.1:<0.10328.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:02] [ns_1@127.0.0.1:<0.10372.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:03] [ns_1@127.0.0.1:<0.10357.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:03] [ns_1@127.0.0.1:<0.10292.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:02:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.10372.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:02:04] [ns_1@127.0.0.1:<0.10394.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:04] [ns_1@127.0.0.1:<0.10340.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:05] [ns_1@127.0.0.1:<0.10367.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:02:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10379.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:02:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10412.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:02:05] [ns_1@127.0.0.1:<0.10305.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:06] [ns_1@127.0.0.1:<0.10407.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:06] [ns_1@127.0.0.1:<0.10355.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:07] [ns_1@127.0.0.1:<0.10387.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:07] [ns_1@127.0.0.1:<0.10323.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:08] [ns_1@127.0.0.1:<0.10423.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:08] [ns_1@127.0.0.1:<0.10365.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:09] [ns_1@127.0.0.1:<0.10400.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:09] [ns_1@127.0.0.1:<0.10336.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:09] [ns_1@127.0.0.1:<0.10444.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:10] [ns_1@127.0.0.1:<0.10433.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:02:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10412.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:02:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10450.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:02:10] [ns_1@127.0.0.1:<0.10385.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:10] [ns_1@127.0.0.1:<0.10444.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:11] [ns_1@127.0.0.1:<0.10413.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:11] [ns_1@127.0.0.1:<0.10350.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:11] [ns_1@127.0.0.1:<0.10444.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:12] [ns_1@127.0.0.1:<0.10451.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:12] [ns_1@127.0.0.1:<0.10398.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:12] [ns_1@127.0.0.1:<0.10444.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:13] [ns_1@127.0.0.1:<0.10427.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:13] [ns_1@127.0.0.1:<0.10361.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:02:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.10444.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:02:14] [ns_1@127.0.0.1:<0.10466.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:14] [ns_1@127.0.0.1:<0.10409.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:15] [ns_1@127.0.0.1:<0.10438.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:02:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10450.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:02:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10484.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:02:15] [ns_1@127.0.0.1:<0.10376.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:16] [ns_1@127.0.0.1:<0.10478.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:16] [ns_1@127.0.0.1:<0.10425.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:17] [ns_1@127.0.0.1:<0.10458.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:17] [ns_1@127.0.0.1:<0.10471.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:17] [ns_1@127.0.0.1:<0.10487.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:17] [ns_1@127.0.0.1:<0.10392.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:18] [ns_1@127.0.0.1:<0.10493.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:18] [ns_1@127.0.0.1:<0.10436.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:19] [ns_1@127.0.0.1:<0.10504.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:19] [ns_1@127.0.0.1:<0.10514.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:19] [ns_1@127.0.0.1:<0.10405.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:20] [ns_1@127.0.0.1:<0.10508.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:02:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10484.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:02:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10522.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:02:20] [ns_1@127.0.0.1:<0.10514.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:20] [ns_1@127.0.0.1:<0.10456.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:21] [ns_1@127.0.0.1:<0.10517.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:21] [ns_1@127.0.0.1:<0.10514.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:21] [ns_1@127.0.0.1:<0.10421.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:22] [ns_1@127.0.0.1:<0.10525.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:22] [ns_1@127.0.0.1:<0.10514.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:22] [ns_1@127.0.0.1:<0.10469.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:23] [ns_1@127.0.0.1:<0.10535.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:02:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.10514.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:02:23] [ns_1@127.0.0.1:<0.10431.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:24] [ns_1@127.0.0.1:<0.10540.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:24] [ns_1@127.0.0.1:<0.10481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:02:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10522.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:02:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10555.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:02:25] [ns_1@127.0.0.1:<0.10548.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:25] [ns_1@127.0.0.1:<0.10447.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:26] [ns_1@127.0.0.1:<0.10552.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:26] [ns_1@127.0.0.1:<0.10495.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:27] [ns_1@127.0.0.1:<0.10562.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:27] [ns_1@127.0.0.1:<0.10464.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:28] [ns_1@127.0.0.1:<0.10567.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:28] [ns_1@127.0.0.1:<0.10497.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:29] [ns_1@127.0.0.1:<0.10573.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:29] [ns_1@127.0.0.1:<0.10583.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:30] [ns_1@127.0.0.1:<0.10476.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:02:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10555.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:02:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10590.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:02:30] [ns_1@127.0.0.1:<0.10577.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:30] [ns_1@127.0.0.1:<0.10583.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:31] [ns_1@127.0.0.1:<0.10499.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:31] [ns_1@127.0.0.1:<0.10587.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:31] [ns_1@127.0.0.1:<0.10583.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:32] [ns_1@127.0.0.1:<0.10491.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:32] [ns_1@127.0.0.1:<0.10596.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:32] [ns_1@127.0.0.1:<0.10583.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:33] [ns_1@127.0.0.1:<0.10510.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:33] [ns_1@127.0.0.1:<0.10603.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:02:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.10583.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:02:34] [ns_1@127.0.0.1:<0.10506.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:34] [ns_1@127.0.0.1:<0.10609.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:35] [ns_1@127.0.0.1:<0.10530.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:02:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10590.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:02:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10623.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:02:35] [ns_1@127.0.0.1:<0.10616.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:36] [ns_1@127.0.0.1:<0.10519.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:36] [ns_1@127.0.0.1:<0.10620.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:37] [ns_1@127.0.0.1:<0.10542.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:37] [ns_1@127.0.0.1:<0.10632.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:38] [ns_1@127.0.0.1:<0.10537.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:38] [ns_1@127.0.0.1:<0.10636.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:39] [ns_1@127.0.0.1:<0.10556.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:39] [ns_1@127.0.0.1:<0.10642.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:39] [ns_1@127.0.0.1:<0.10653.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:40] [ns_1@127.0.0.1:<0.10550.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:02:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10623.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:02:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10659.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:02:40] [ns_1@127.0.0.1:<0.10647.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:40] [ns_1@127.0.0.1:<0.10653.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:41] [ns_1@127.0.0.1:<0.10569.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:41] [ns_1@127.0.0.1:<0.10656.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:41] [ns_1@127.0.0.1:<0.10653.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:42] [ns_1@127.0.0.1:<0.10564.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:42] [ns_1@127.0.0.1:<0.10665.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:42] [ns_1@127.0.0.1:<0.10653.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:43] [ns_1@127.0.0.1:<0.10579.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:43] [ns_1@127.0.0.1:<0.10673.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:02:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.10653.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:02:44] [ns_1@127.0.0.1:<0.10575.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:44] [ns_1@127.0.0.1:<0.10678.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:45] [ns_1@127.0.0.1:<0.10598.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:02:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10659.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:02:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10693.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:02:45] [ns_1@127.0.0.1:<0.10685.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:46] [ns_1@127.0.0.1:<0.10591.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:46] [ns_1@127.0.0.1:<0.10690.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:47] [ns_1@127.0.0.1:<0.10611.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:47] [ns_1@127.0.0.1:<0.10700.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:48] [ns_1@127.0.0.1:<0.10605.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:48] [ns_1@127.0.0.1:<0.10704.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:49] [ns_1@127.0.0.1:<0.10624.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:49] [ns_1@127.0.0.1:<0.10711.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:49] [ns_1@127.0.0.1:<0.10721.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:50] [ns_1@127.0.0.1:<0.10618.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:02:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10693.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:02:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10727.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:02:50] [ns_1@127.0.0.1:<0.10721.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:50] [ns_1@127.0.0.1:<0.10715.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:51] [ns_1@127.0.0.1:<0.10638.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:51] [ns_1@127.0.0.1:<0.10721.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:51] [ns_1@127.0.0.1:<0.10724.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:52] [ns_1@127.0.0.1:<0.10634.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:52] [ns_1@127.0.0.1:<0.10721.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:52] [ns_1@127.0.0.1:<0.10735.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:53] [ns_1@127.0.0.1:<0.10649.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:02:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.10721.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:02:53] [ns_1@127.0.0.1:<0.10742.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:54] [ns_1@127.0.0.1:<0.10644.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:54] [ns_1@127.0.0.1:<0.10747.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:55] [ns_1@127.0.0.1:<0.10667.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:02:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10727.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:02:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10762.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:02:55] [ns_1@127.0.0.1:<0.10755.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:56] [ns_1@127.0.0.1:<0.10660.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:56] [ns_1@127.0.0.1:<0.10759.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:57] [ns_1@127.0.0.1:<0.10680.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:57] [ns_1@127.0.0.1:<0.10769.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:58] [ns_1@127.0.0.1:<0.10675.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:58] [ns_1@127.0.0.1:<0.10774.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:02:59] [ns_1@127.0.0.1:<0.10694.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:02:59] [ns_1@127.0.0.1:<0.10802.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:02:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,752575,959376}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38250600}, {processes,10131400}, {processes_used,8505920}, {system,28119200}, {atom,1306681}, {atom_used,1284164}, {binary,675816}, {code,12859877}, {ets,2369192}]}, {system_stats, [{cpu_utilization_rate,25.43640897755611}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,3597}, {memory_data,{4040077312,4015202304,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 24416 kB\nBuffers: 60144 kB\nCached: 3531340 kB\nSwapCached: 0 kB\nActive: 309404 kB\nInactive: 3444012 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 24416 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 72 kB\nWriteback: 0 kB\nAnonPages: 161936 kB\nMapped: 24868 kB\nSlab: 134364 kB\nPageTables: 6464 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 584436 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3616092160}, {buffered_memory,61587456}, {free_memory,25001984}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{3592695,0}}, {context_switches,{1183691,0}}, {garbage_collection,{620535,839001210,0}}, {io,{{input,22862923},{output,43030662}}}, {reductions,{255089618,633374}}, {run_queue,0}, {runtime,{49570,130}}]}]}] [stats:error] [2012-03-26 2:02:59] [ns_1@127.0.0.1:<0.10780.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:00] [ns_1@127.0.0.1:<0.10687.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:03:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10762.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:03:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10811.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:03:00] [ns_1@127.0.0.1:<0.10802.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:00] [ns_1@127.0.0.1:<0.10784.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:01] [ns_1@127.0.0.1:<0.10706.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:01] [ns_1@127.0.0.1:<0.10802.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:01] [ns_1@127.0.0.1:<0.10808.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:02] [ns_1@127.0.0.1:<0.10702.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:02] [ns_1@127.0.0.1:<0.10802.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:02] [ns_1@127.0.0.1:<0.10817.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:03] [ns_1@127.0.0.1:<0.10717.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:03:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.10802.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:03:03] [ns_1@127.0.0.1:<0.10824.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:04] [ns_1@127.0.0.1:<0.10713.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:04] [ns_1@127.0.0.1:<0.10830.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:03:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10811.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:03:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10842.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:03:05] [ns_1@127.0.0.1:<0.10740.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:06] [ns_1@127.0.0.1:<0.10837.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:06] [ns_1@127.0.0.1:<0.10728.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:07] [ns_1@127.0.0.1:<0.10843.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:07] [ns_1@127.0.0.1:<0.10753.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:08] [ns_1@127.0.0.1:<0.10853.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:08] [ns_1@127.0.0.1:<0.10745.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:09] [ns_1@127.0.0.1:<0.10857.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:09] [ns_1@127.0.0.1:<0.10765.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:09] [ns_1@127.0.0.1:<0.10874.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:10] [ns_1@127.0.0.1:<0.10863.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:03:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10842.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:03:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10880.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:03:10] [ns_1@127.0.0.1:<0.10757.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:10] [ns_1@127.0.0.1:<0.10874.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:11] [ns_1@127.0.0.1:<0.10868.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:11] [ns_1@127.0.0.1:<0.10778.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:11] [ns_1@127.0.0.1:<0.10874.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:12] [ns_1@127.0.0.1:<0.10881.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:12] [ns_1@127.0.0.1:<0.10772.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:12] [ns_1@127.0.0.1:<0.10874.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:13] [ns_1@127.0.0.1:<0.10888.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:13] [ns_1@127.0.0.1:<0.10806.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:03:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.10874.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:03:14] [ns_1@127.0.0.1:<0.10896.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:14] [ns_1@127.0.0.1:<0.10782.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:15] [ns_1@127.0.0.1:<0.10901.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:03:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10880.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:03:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10914.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:03:15] [ns_1@127.0.0.1:<0.10822.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:16] [ns_1@127.0.0.1:<0.10908.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:16] [ns_1@127.0.0.1:<0.10813.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:17] [ns_1@127.0.0.1:<0.10915.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:17] [ns_1@127.0.0.1:<0.10835.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:17] [ns_1@127.0.0.1:<0.10851.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:17] [ns_1@127.0.0.1:<0.10861.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:17] [ns_1@127.0.0.1:<0.10877.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:17] [ns_1@127.0.0.1:<0.10894.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:18] [ns_1@127.0.0.1:<0.10923.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:18] [ns_1@127.0.0.1:<0.10828.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:19] [ns_1@127.0.0.1:<0.10927.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:19] [ns_1@127.0.0.1:<0.10906.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:19] [ns_1@127.0.0.1:<0.10950.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:20] [ns_1@127.0.0.1:<0.10942.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:03:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10914.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:03:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10956.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:03:20] [ns_1@127.0.0.1:<0.10839.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:20] [ns_1@127.0.0.1:<0.10950.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:21] [ns_1@127.0.0.1:<0.10929.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:21] [ns_1@127.0.0.1:<0.10921.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:21] [ns_1@127.0.0.1:<0.10950.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:22] [ns_1@127.0.0.1:<0.10957.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:22] [ns_1@127.0.0.1:<0.10855.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:22] [ns_1@127.0.0.1:<0.10950.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:23] [ns_1@127.0.0.1:<0.10931.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:23] [ns_1@127.0.0.1:<0.10940.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:03:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.10950.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:03:24] [ns_1@127.0.0.1:<0.10973.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:24] [ns_1@127.0.0.1:<0.10866.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:25] [ns_1@127.0.0.1:<0.10933.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:03:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10956.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:03:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.10991.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:03:25] [ns_1@127.0.0.1:<0.10953.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:26] [ns_1@127.0.0.1:<0.10986.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:26] [ns_1@127.0.0.1:<0.10886.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:27] [ns_1@127.0.0.1:<0.10935.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:27] [ns_1@127.0.0.1:<0.10971.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:28] [ns_1@127.0.0.1:<0.11000.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:28] [ns_1@127.0.0.1:<0.10899.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:29] [ns_1@127.0.0.1:<0.10946.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:29] [ns_1@127.0.0.1:<0.11017.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:29] [ns_1@127.0.0.1:<0.10984.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:30] [ns_1@127.0.0.1:<0.11011.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:03:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.10991.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:03:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11026.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:03:30] [ns_1@127.0.0.1:<0.11017.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:30] [ns_1@127.0.0.1:<0.10911.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:31] [ns_1@127.0.0.1:<0.10966.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:31] [ns_1@127.0.0.1:<0.11017.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:31] [ns_1@127.0.0.1:<0.10998.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:32] [ns_1@127.0.0.1:<0.11028.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:32] [ns_1@127.0.0.1:<0.11017.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:32] [ns_1@127.0.0.1:<0.10925.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:33] [ns_1@127.0.0.1:<0.10978.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:03:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.11017.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:03:33] [ns_1@127.0.0.1:<0.11009.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:34] [ns_1@127.0.0.1:<0.11043.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:34] [ns_1@127.0.0.1:<0.10944.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:35] [ns_1@127.0.0.1:<0.10994.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:03:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11026.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:03:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11059.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:03:35] [ns_1@127.0.0.1:<0.11023.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:36] [ns_1@127.0.0.1:<0.11054.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:36] [ns_1@127.0.0.1:<0.10964.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:37] [ns_1@127.0.0.1:<0.11007.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:37] [ns_1@127.0.0.1:<0.11039.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:38] [ns_1@127.0.0.1:<0.11070.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:38] [ns_1@127.0.0.1:<0.10976.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:39] [ns_1@127.0.0.1:<0.11021.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:39] [ns_1@127.0.0.1:<0.11087.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:40] [ns_1@127.0.0.1:<0.11052.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:03:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11059.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:03:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11093.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:03:40] [ns_1@127.0.0.1:<0.11081.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:40] [ns_1@127.0.0.1:<0.11087.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:41] [ns_1@127.0.0.1:<0.10988.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:41] [ns_1@127.0.0.1:<0.11037.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:41] [ns_1@127.0.0.1:<0.11087.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:42] [ns_1@127.0.0.1:<0.11068.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:42] [ns_1@127.0.0.1:<0.11099.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:42] [ns_1@127.0.0.1:<0.11087.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:43] [ns_1@127.0.0.1:<0.11003.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:43] [ns_1@127.0.0.1:<0.11050.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:03:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.11087.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:03:44] [ns_1@127.0.0.1:<0.11078.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:44] [ns_1@127.0.0.1:<0.11112.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:45] [ns_1@127.0.0.1:<0.11013.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:03:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11093.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:03:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11127.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:03:45] [ns_1@127.0.0.1:<0.11063.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:46] [ns_1@127.0.0.1:<0.11094.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:46] [ns_1@127.0.0.1:<0.11124.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:47] [ns_1@127.0.0.1:<0.11032.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:47] [ns_1@127.0.0.1:<0.11076.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:48] [ns_1@127.0.0.1:<0.11109.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:48] [ns_1@127.0.0.1:<0.11138.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:49] [ns_1@127.0.0.1:<0.11045.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:49] [ns_1@127.0.0.1:<0.11090.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:49] [ns_1@127.0.0.1:<0.11155.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:50] [ns_1@127.0.0.1:<0.11121.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:03:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11127.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:03:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11161.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:03:50] [ns_1@127.0.0.1:<0.11149.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:50] [ns_1@127.0.0.1:<0.11155.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:51] [ns_1@127.0.0.1:<0.11056.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:51] [ns_1@127.0.0.1:<0.11107.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:51] [ns_1@127.0.0.1:<0.11155.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:52] [ns_1@127.0.0.1:<0.11136.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:52] [ns_1@127.0.0.1:<0.11169.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:52] [ns_1@127.0.0.1:<0.11155.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:53] [ns_1@127.0.0.1:<0.11072.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:53] [ns_1@127.0.0.1:<0.11119.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:03:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.11155.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:03:54] [ns_1@127.0.0.1:<0.11147.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:54] [ns_1@127.0.0.1:<0.11181.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:55] [ns_1@127.0.0.1:<0.11083.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:03:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11161.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:03:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11196.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:03:55] [ns_1@127.0.0.1:<0.11134.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:56] [ns_1@127.0.0.1:<0.11162.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:56] [ns_1@127.0.0.1:<0.11193.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:57] [ns_1@127.0.0.1:<0.11101.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:57] [ns_1@127.0.0.1:<0.11145.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:58] [ns_1@127.0.0.1:<0.11178.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:58] [ns_1@127.0.0.1:<0.11208.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:03:59] [ns_1@127.0.0.1:<0.11114.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:03:59] [ns_1@127.0.0.1:<0.11223.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:03:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,752635,986246}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38243896}, {processes,10088872}, {processes_used,8463392}, {system,28155024}, {atom,1306681}, {atom_used,1284164}, {binary,672800}, {code,12859877}, {ets,2401456}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,3657}, {memory_data,{4040077312,4015202304,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27476 kB\nBuffers: 60232 kB\nCached: 3528012 kB\nSwapCached: 0 kB\nActive: 309524 kB\nInactive: 3440712 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27476 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 76 kB\nWriteback: 0 kB\nAnonPages: 162016 kB\nMapped: 24868 kB\nSlab: 134348 kB\nPageTables: 6464 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582232 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3612684288}, {buffered_memory,61677568}, {free_memory,28135424}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{3652723,0}}, {context_switches,{1197219,0}}, {garbage_collection,{628037,850581290,0}}, {io,{{input,23139267},{output,44009565}}}, {reductions,{257960449,610417}}, {run_queue,0}, {runtime,{50220,140}}]}]}] [stats:error] [2012-03-26 2:03:59] [ns_1@127.0.0.1:<0.11158.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:00] [ns_1@127.0.0.1:<0.11191.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:04:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11196.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:04:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11232.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:04:00] [ns_1@127.0.0.1:<0.11223.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:00] [ns_1@127.0.0.1:<0.11218.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:01] [ns_1@127.0.0.1:<0.11128.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:01] [ns_1@127.0.0.1:<0.11223.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:01] [ns_1@127.0.0.1:<0.11176.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:02] [ns_1@127.0.0.1:<0.11205.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:02] [ns_1@127.0.0.1:<0.11223.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:02] [ns_1@127.0.0.1:<0.11238.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:03] [ns_1@127.0.0.1:<0.11140.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:04:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.11223.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:04:03] [ns_1@127.0.0.1:<0.11189.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:04] [ns_1@127.0.0.1:<0.11216.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:04] [ns_1@127.0.0.1:<0.11251.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:05] [ns_1@127.0.0.1:<0.11151.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:04:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11232.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:04:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11265.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:04:05] [ns_1@127.0.0.1:<0.11203.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:06] [ns_1@127.0.0.1:<0.11234.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:06] [ns_1@127.0.0.1:<0.11262.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:07] [ns_1@127.0.0.1:<0.11171.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:07] [ns_1@127.0.0.1:<0.11214.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:08] [ns_1@127.0.0.1:<0.11249.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:08] [ns_1@127.0.0.1:<0.11278.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:09] [ns_1@127.0.0.1:<0.11183.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:09] [ns_1@127.0.0.1:<0.11295.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:09] [ns_1@127.0.0.1:<0.11229.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:10] [ns_1@127.0.0.1:<0.11260.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:04:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11265.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:04:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11303.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:04:10] [ns_1@127.0.0.1:<0.11295.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:10] [ns_1@127.0.0.1:<0.11289.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:11] [ns_1@127.0.0.1:<0.11197.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:11] [ns_1@127.0.0.1:<0.11295.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:11] [ns_1@127.0.0.1:<0.11245.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:12] [ns_1@127.0.0.1:<0.11276.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:12] [ns_1@127.0.0.1:<0.11295.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:12] [ns_1@127.0.0.1:<0.11309.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:13] [ns_1@127.0.0.1:<0.11210.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:04:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.11295.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:04:13] [ns_1@127.0.0.1:<0.11258.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:14] [ns_1@127.0.0.1:<0.11287.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:15] [ns_1@127.0.0.1:<0.11322.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11303.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11335.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:04:15] [ns_1@127.0.0.1:<0.11226.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:16] [ns_1@127.0.0.1:<0.11274.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:16] [ns_1@127.0.0.1:<0.11305.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:17] [ns_1@127.0.0.1:<0.11336.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:17] [ns_1@127.0.0.1:<0.11243.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:18] [ns_1@127.0.0.1:<0.11284.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:18] [ns_1@127.0.0.1:<0.11329.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:18] [ns_1@127.0.0.1:<0.11344.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:18] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 2:04:18] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 2:04:18] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:04:18] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:04:18] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:warn] [2012-03-26 2:04:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 2:04:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 2:04:23] [ns_1@127.0.0.1:<0.11320.1>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 2:04:23] [ns_1@127.0.0.1:<0.11282.1>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 2:04:23] [ns_1@127.0.0.1:<0.11390.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:23] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 2:04:23] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 2:04:23] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:04:23] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:04:23] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 2:04:23] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 2:04:23] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:04:23] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:04:23] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 2:04:23] [ns_1@127.0.0.1:<0.11298.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:23] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 02:04:25: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 2:04:23] [ns_1@127.0.0.1:<0.11315.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:24] [ns_1@127.0.0.1:<0.11390.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:24] [ns_1@127.0.0.1:<0.11332.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:24] [ns_1@127.0.0.1:<0.11346.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:25] [ns_1@127.0.0.1:<0.11390.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:25] [ns_1@127.0.0.1:<0.11400.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:04:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11335.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:04:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11414.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:04:25] [ns_1@127.0.0.1:<0.11327.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:26] [ns_1@127.0.0.1:<0.11390.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:26] [ns_1@127.0.0.1:<0.11408.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:26] [ns_1@127.0.0.1:<0.11383.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:27] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:04:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.11390.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:04:27] [ns_1@127.0.0.1:<0.11415.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:27] [ns_1@127.0.0.1:<0.11342.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:28] [ns_1@127.0.0.1:<0.11424.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:28] [ns_1@127.0.0.1:<0.11384.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:29] [ns_1@127.0.0.1:<0.11430.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:29] [ns_1@127.0.0.1:<0.11353.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:29] [ns_1@127.0.0.1:<0.11444.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:30] [ns_1@127.0.0.1:<0.11436.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:04:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11414.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:04:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11451.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:04:30] [ns_1@127.0.0.1:<0.11385.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:30] [ns_1@127.0.0.1:<0.11444.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:31] [ns_1@127.0.0.1:<0.11440.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:31] [ns_1@127.0.0.1:<0.11355.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:31] [ns_1@127.0.0.1:<0.11444.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:32] [ns_1@127.0.0.1:<0.11452.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:32] [ns_1@127.0.0.1:<0.11444.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:32] [ns_1@127.0.0.1:<0.11386.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:33] [ns_1@127.0.0.1:<0.11459.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:04:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.11444.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:04:33] [ns_1@127.0.0.1:<0.11358.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:34] [ns_1@127.0.0.1:<0.11466.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:34] [ns_1@127.0.0.1:<0.11395.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:35] [ns_1@127.0.0.1:<0.11475.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:04:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11451.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:04:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11484.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:04:35] [ns_1@127.0.0.1:<0.11361.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:36] [ns_1@127.0.0.1:<0.11479.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:36] [ns_1@127.0.0.1:<0.11411.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:37] [ns_1@127.0.0.1:<0.11488.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:37] [ns_1@127.0.0.1:<0.11363.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:38] [ns_1@127.0.0.1:<0.11495.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:38] [ns_1@127.0.0.1:<0.11428.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:39] [ns_1@127.0.0.1:<0.11501.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:39] [ns_1@127.0.0.1:<0.11512.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:39] [ns_1@127.0.0.1:<0.11365.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:40] [ns_1@127.0.0.1:<0.11506.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:04:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11484.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:04:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11520.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:04:40] [ns_1@127.0.0.1:<0.11512.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:40] [ns_1@127.0.0.1:<0.11438.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:41] [ns_1@127.0.0.1:<0.11515.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:41] [ns_1@127.0.0.1:<0.11512.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:41] [ns_1@127.0.0.1:<0.11406.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:42] [ns_1@127.0.0.1:<0.11522.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:42] [ns_1@127.0.0.1:<0.11512.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:42] [ns_1@127.0.0.1:<0.11457.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:43] [ns_1@127.0.0.1:<0.11532.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:04:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.11512.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:04:43] [ns_1@127.0.0.1:<0.11422.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:44] [ns_1@127.0.0.1:<0.11537.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:44] [ns_1@127.0.0.1:<0.11470.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:45] [ns_1@127.0.0.1:<0.11544.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:04:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11520.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:04:46] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11554.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:04:46] [ns_1@127.0.0.1:<0.11549.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:47] [ns_1@127.0.0.1:<0.11481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:47] [ns_1@127.0.0.1:<0.11434.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:48] [ns_1@127.0.0.1:<0.11448.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:48] [ns_1@127.0.0.1:<0.11561.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:49] [ns_1@127.0.0.1:<0.11497.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:49] [ns_1@127.0.0.1:<0.11568.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:49] [ns_1@127.0.0.1:<0.11578.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:50] [ns_1@127.0.0.1:<0.11464.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:04:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11554.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:04:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11584.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:04:50] [ns_1@127.0.0.1:<0.11572.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:50] [ns_1@127.0.0.1:<0.11578.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:51] [ns_1@127.0.0.1:<0.11508.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:51] [ns_1@127.0.0.1:<0.11581.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:51] [ns_1@127.0.0.1:<0.11578.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:52] [ns_1@127.0.0.1:<0.11477.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:52] [ns_1@127.0.0.1:<0.11592.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:52] [ns_1@127.0.0.1:<0.11578.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:53] [ns_1@127.0.0.1:<0.11526.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:53] [ns_1@127.0.0.1:<0.11599.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:04:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.11578.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:04:54] [ns_1@127.0.0.1:<0.11493.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:54] [ns_1@127.0.0.1:<0.11604.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:55] [ns_1@127.0.0.1:<0.11539.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:04:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11584.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:04:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11619.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:04:55] [ns_1@127.0.0.1:<0.11612.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:56] [ns_1@127.0.0.1:<0.11503.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:56] [ns_1@127.0.0.1:<0.11616.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:57] [ns_1@127.0.0.1:<0.11551.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:57] [ns_1@127.0.0.1:<0.11626.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:58] [ns_1@127.0.0.1:<0.11517.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:58] [ns_1@127.0.0.1:<0.11631.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:59] [ns_1@127.0.0.1:<0.11563.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:04:59] [ns_1@127.0.0.1:<0.11637.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:04:59] [ns_1@127.0.0.1:<0.11648.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:04:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,752696,14240}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38391280}, {processes,10171976}, {processes_used,8546496}, {system,28219304}, {atom,1306681}, {atom_used,1284164}, {binary,701552}, {code,12859877}, {ets,2430056}]}, {system_stats, [{cpu_utilization_rate,25.25}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,3717}, {memory_data,{4040077312,4011941888,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27476 kB\nBuffers: 60332 kB\nCached: 3528176 kB\nSwapCached: 0 kB\nActive: 309744 kB\nInactive: 3440828 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27476 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 60 kB\nWriteback: 0 kB\nAnonPages: 162060 kB\nMapped: 24872 kB\nSlab: 134368 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580176 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3612852224}, {buffered_memory,61779968}, {free_memory,28135424}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{3712751,0}}, {context_switches,{1210408,0}}, {garbage_collection,{635268,862195910,0}}, {io,{{input,23175662},{output,44490917}}}, {reductions,{260586087,606718}}, {run_queue,0}, {runtime,{50820,150}}]}]}] [stats:error] [2012-03-26 2:05:00] [ns_1@127.0.0.1:<0.11534.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:05:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11619.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:05:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11655.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:05:00] [ns_1@127.0.0.1:<0.11641.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:00] [ns_1@127.0.0.1:<0.11648.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:01] [ns_1@127.0.0.1:<0.11574.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:01] [ns_1@127.0.0.1:<0.11652.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:01] [ns_1@127.0.0.1:<0.11648.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:02] [ns_1@127.0.0.1:<0.11546.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:02] [ns_1@127.0.0.1:<0.11661.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:02] [ns_1@127.0.0.1:<0.11648.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:03] [ns_1@127.0.0.1:<0.11594.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:05:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.11648.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:05:03] [ns_1@127.0.0.1:<0.11668.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:04] [ns_1@127.0.0.1:<0.11559.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:04] [ns_1@127.0.0.1:<0.11674.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:05] [ns_1@127.0.0.1:<0.11606.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:05:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11655.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:05:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11688.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:05:05] [ns_1@127.0.0.1:<0.11681.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:06] [ns_1@127.0.0.1:<0.11570.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:06] [ns_1@127.0.0.1:<0.11685.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:07] [ns_1@127.0.0.1:<0.11620.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:07] [ns_1@127.0.0.1:<0.11697.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:08] [ns_1@127.0.0.1:<0.11585.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:08] [ns_1@127.0.0.1:<0.11701.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:09] [ns_1@127.0.0.1:<0.11633.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:09] [ns_1@127.0.0.1:<0.11718.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:09] [ns_1@127.0.0.1:<0.11707.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:10] [ns_1@127.0.0.1:<0.11601.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:05:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11688.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:05:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11726.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:05:10] [ns_1@127.0.0.1:<0.11718.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:10] [ns_1@127.0.0.1:<0.11712.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:11] [ns_1@127.0.0.1:<0.11643.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:11] [ns_1@127.0.0.1:<0.11718.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:11] [ns_1@127.0.0.1:<0.11723.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:12] [ns_1@127.0.0.1:<0.11614.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:12] [ns_1@127.0.0.1:<0.11718.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:12] [ns_1@127.0.0.1:<0.11732.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:13] [ns_1@127.0.0.1:<0.11663.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:05:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.11718.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:05:13] [ns_1@127.0.0.1:<0.11740.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:14] [ns_1@127.0.0.1:<0.11628.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:14] [ns_1@127.0.0.1:<0.11745.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:05:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11726.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:05:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11758.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:05:15] [ns_1@127.0.0.1:<0.11679.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:15] [ns_1@127.0.0.1:<0.11752.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:16] [ns_1@127.0.0.1:<0.11639.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:16] [ns_1@127.0.0.1:<0.11759.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:17] [ns_1@127.0.0.1:<0.11692.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:17] [ns_1@127.0.0.1:<0.11767.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:18] [ns_1@127.0.0.1:<0.11656.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:19] [ns_1@127.0.0.1:<0.11771.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:19] [ns_1@127.0.0.1:<0.11705.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:19] [ns_1@127.0.0.1:<0.11786.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:20] [ns_1@127.0.0.1:<0.11778.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:05:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11758.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:05:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11792.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:05:20] [ns_1@127.0.0.1:<0.11670.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:20] [ns_1@127.0.0.1:<0.11786.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:21] [ns_1@127.0.0.1:<0.11782.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:21] [ns_1@127.0.0.1:<0.11721.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:21] [ns_1@127.0.0.1:<0.11786.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:22] [ns_1@127.0.0.1:<0.11793.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:22] [ns_1@127.0.0.1:<0.11683.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:22] [ns_1@127.0.0.1:<0.11786.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:23] [ns_1@127.0.0.1:<0.11802.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:23] [ns_1@127.0.0.1:<0.11738.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:05:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.11786.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 2:05:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11792.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:05:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11821.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:05:29] [ns_1@127.0.0.1:<0.11831.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:05:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11821.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:05:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11836.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:05:30] [ns_1@127.0.0.1:<0.11831.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:31] [ns_1@127.0.0.1:<0.11831.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:32] [ns_1@127.0.0.1:<0.11831.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:33] [ns_1@127.0.0.1:<0.11750.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:05:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.11831.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:05:34] [ns_1@127.0.0.1:<0.11809.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:34] [ns_1@127.0.0.1:<0.11699.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:35] [ns_1@127.0.0.1:<0.11814.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:05:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11836.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:05:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11857.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:05:35] [ns_1@127.0.0.1:<0.11763.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:36] [ns_1@127.0.0.1:<0.11852.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:36] [ns_1@127.0.0.1:<0.11710.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:37] [ns_1@127.0.0.1:<0.11845.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:37] [ns_1@127.0.0.1:<0.11776.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:38] [ns_1@127.0.0.1:<0.11868.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:38] [ns_1@127.0.0.1:<0.11728.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:39] [ns_1@127.0.0.1:<0.11858.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:39] [ns_1@127.0.0.1:<0.11789.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:39] [ns_1@127.0.0.1:<0.11887.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:40] [ns_1@127.0.0.1:<0.11878.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:05:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11857.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:05:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11893.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:05:40] [ns_1@127.0.0.1:<0.11743.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:40] [ns_1@127.0.0.1:<0.11887.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:41] [ns_1@127.0.0.1:<0.11872.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:41] [ns_1@127.0.0.1:<0.11807.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:41] [ns_1@127.0.0.1:<0.11887.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:42] [ns_1@127.0.0.1:<0.11894.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:42] [ns_1@127.0.0.1:<0.11755.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:42] [ns_1@127.0.0.1:<0.11887.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:43] [ns_1@127.0.0.1:<0.11883.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:43] [ns_1@127.0.0.1:<0.11850.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:05:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.11887.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:05:44] [ns_1@127.0.0.1:<0.11909.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:44] [ns_1@127.0.0.1:<0.11769.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:45] [ns_1@127.0.0.1:<0.11901.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:05:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11893.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:05:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11927.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:05:45] [ns_1@127.0.0.1:<0.11866.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:46] [ns_1@127.0.0.1:<0.11921.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:46] [ns_1@127.0.0.1:<0.11780.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:47] [ns_1@127.0.0.1:<0.11914.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:47] [ns_1@127.0.0.1:<0.11876.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:48] [ns_1@127.0.0.1:<0.11936.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:48] [ns_1@127.0.0.1:<0.11800.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:49] [ns_1@127.0.0.1:<0.11928.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:49] [ns_1@127.0.0.1:<0.11953.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:49] [ns_1@127.0.0.1:<0.11890.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:50] [ns_1@127.0.0.1:<0.11947.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:05:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11927.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:05:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11961.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:05:50] [ns_1@127.0.0.1:<0.11953.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:50] [ns_1@127.0.0.1:<0.11812.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:51] [ns_1@127.0.0.1:<0.11942.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:51] [ns_1@127.0.0.1:<0.11953.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:51] [ns_1@127.0.0.1:<0.11907.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:52] [ns_1@127.0.0.1:<0.11964.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:52] [ns_1@127.0.0.1:<0.11953.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:52] [ns_1@127.0.0.1:<0.11854.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:53] [ns_1@127.0.0.1:<0.11956.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:05:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.11953.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:05:53] [ns_1@127.0.0.1:<0.11919.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:54] [ns_1@127.0.0.1:<0.11979.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:54] [ns_1@127.0.0.1:<0.11870.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:55] [ns_1@127.0.0.1:<0.11974.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:05:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11961.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:05:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11996.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:05:55] [ns_1@127.0.0.1:<0.11934.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:56] [ns_1@127.0.0.1:<0.11991.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:56] [ns_1@127.0.0.1:<0.11881.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:57] [ns_1@127.0.0.1:<0.11987.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:57] [ns_1@127.0.0.1:<0.11945.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:58] [ns_1@127.0.0.1:<0.12006.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:58] [ns_1@127.0.0.1:<0.11899.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:05:59] [ns_1@127.0.0.1:<0.11999.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:05:59] [ns_1@127.0.0.1:<0.12038.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:05:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,752756,41319}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38574880}, {processes,10376392}, {processes_used,8750912}, {system,28198488}, {atom,1306681}, {atom_used,1284164}, {binary,702424}, {code,12859877}, {ets,2401296}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,3777}, {memory_data,{4040077312,4011941888,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27352 kB\nBuffers: 60412 kB\nCached: 3528324 kB\nSwapCached: 0 kB\nActive: 309844 kB\nInactive: 3440944 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27352 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 64 kB\nWriteback: 0 kB\nAnonPages: 162088 kB\nMapped: 24872 kB\nSlab: 134364 kB\nPageTables: 6464 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580176 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3613003776}, {buffered_memory,61861888}, {free_memory,28008448}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{3772779,0}}, {context_switches,{1222528,0}}, {garbage_collection,{642308,872049982,0}}, {io,{{input,23206224},{output,44895225}}}, {reductions,{262982737,635284}}, {run_queue,0}, {runtime,{51350,140}}]}]}] [stats:error] [2012-03-26 2:05:59] [ns_1@127.0.0.1:<0.11958.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:00] [ns_1@127.0.0.1:<0.12016.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:06:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.11996.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:06:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12047.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:06:00] [ns_1@127.0.0.1:<0.12038.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:00] [ns_1@127.0.0.1:<0.11912.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:01] [ns_1@127.0.0.1:<0.12012.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:01] [ns_1@127.0.0.1:<0.12038.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:01] [ns_1@127.0.0.1:<0.11976.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:02] [ns_1@127.0.0.1:<0.12049.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:02] [ns_1@127.0.0.1:<0.12038.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:02] [ns_1@127.0.0.1:<0.11924.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:03] [ns_1@127.0.0.1:<0.12042.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:06:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.12038.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:06:04] [ns_1@127.0.0.1:<0.11989.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:04] [ns_1@127.0.0.1:<0.12064.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:05] [ns_1@127.0.0.1:<0.11938.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:06:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12047.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:06:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12078.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:06:05] [ns_1@127.0.0.1:<0.12058.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:06] [ns_1@127.0.0.1:<0.12003.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:06] [ns_1@127.0.0.1:<0.12075.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:07] [ns_1@127.0.0.1:<0.11949.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:07] [ns_1@127.0.0.1:<0.12071.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:08] [ns_1@127.0.0.1:<0.12014.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:08] [ns_1@127.0.0.1:<0.12091.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:09] [ns_1@127.0.0.1:<0.11969.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:09] [ns_1@127.0.0.1:<0.12084.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:09] [ns_1@127.0.0.1:<0.12110.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:10] [ns_1@127.0.0.1:<0.12044.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:06:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12078.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:06:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12116.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:06:10] [ns_1@127.0.0.1:<0.12102.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:10] [ns_1@127.0.0.1:<0.12110.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:11] [ns_1@127.0.0.1:<0.11981.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:11] [ns_1@127.0.0.1:<0.12097.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:11] [ns_1@127.0.0.1:<0.12110.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:12] [ns_1@127.0.0.1:<0.12060.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:12] [ns_1@127.0.0.1:<0.12122.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:12] [ns_1@127.0.0.1:<0.12110.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:13] [ns_1@127.0.0.1:<0.11993.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:13] [ns_1@127.0.0.1:<0.12113.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:06:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.12110.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:06:14] [ns_1@127.0.0.1:<0.12073.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:14] [ns_1@127.0.0.1:<0.12135.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:15] [ns_1@127.0.0.1:<0.12008.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:06:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12116.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:06:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12150.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:06:15] [ns_1@127.0.0.1:<0.12130.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:16] [ns_1@127.0.0.1:<0.12089.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:16] [ns_1@127.0.0.1:<0.12147.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:17] [ns_1@127.0.0.1:<0.12018.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:17] [ns_1@127.0.0.1:<0.12142.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:18] [ns_1@127.0.0.1:<0.12099.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:18] [ns_1@127.0.0.1:<0.12161.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:19] [ns_1@127.0.0.1:<0.12053.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:19] [ns_1@127.0.0.1:<0.12157.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:19] [ns_1@127.0.0.1:<0.12178.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:20] [ns_1@127.0.0.1:<0.12117.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:06:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12150.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:06:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12184.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:06:20] [ns_1@127.0.0.1:<0.12172.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:20] [ns_1@127.0.0.1:<0.12178.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:21] [ns_1@127.0.0.1:<0.12066.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:21] [ns_1@127.0.0.1:<0.12168.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:21] [ns_1@127.0.0.1:<0.12178.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:22] [ns_1@127.0.0.1:<0.12132.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:22] [ns_1@127.0.0.1:<0.12192.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:22] [ns_1@127.0.0.1:<0.12178.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:23] [ns_1@127.0.0.1:<0.12079.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:23] [ns_1@127.0.0.1:<0.12181.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:06:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.12178.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:06:24] [ns_1@127.0.0.1:<0.12144.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:24] [ns_1@127.0.0.1:<0.12204.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:25] [ns_1@127.0.0.1:<0.12093.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:06:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12184.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:06:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12219.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:06:25] [ns_1@127.0.0.1:<0.12199.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:26] [ns_1@127.0.0.1:<0.12159.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:26] [ns_1@127.0.0.1:<0.12216.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:27] [ns_1@127.0.0.1:<0.12104.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:27] [ns_1@127.0.0.1:<0.12212.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:28] [ns_1@127.0.0.1:<0.12170.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:28] [ns_1@127.0.0.1:<0.12231.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:29] [ns_1@127.0.0.1:<0.12124.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:29] [ns_1@127.0.0.1:<0.12245.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:29] [ns_1@127.0.0.1:<0.12226.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:30] [ns_1@127.0.0.1:<0.12185.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:06:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12219.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:06:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12254.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:06:30] [ns_1@127.0.0.1:<0.12245.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:30] [ns_1@127.0.0.1:<0.12241.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:31] [ns_1@127.0.0.1:<0.12137.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:31] [ns_1@127.0.0.1:<0.12245.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:31] [ns_1@127.0.0.1:<0.12237.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:32] [ns_1@127.0.0.1:<0.12201.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:32] [ns_1@127.0.0.1:<0.12245.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:32] [ns_1@127.0.0.1:<0.12260.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:33] [ns_1@127.0.0.1:<0.12151.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:06:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.12245.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:06:33] [ns_1@127.0.0.1:<0.12251.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:33] [ns_1@127.0.0.1:<0.12267.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:33] [ns_1@127.0.0.1:<0.12214.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:34] [ns_1@127.0.0.1:<0.12229.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:34] [ns_1@127.0.0.1:<0.12273.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:35] [ns_1@127.0.0.1:<0.12163.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:06:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12254.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:06:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12291.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:06:35] [ns_1@127.0.0.1:<0.12284.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:36] [ns_1@127.0.0.1:<0.12239.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:36] [ns_1@127.0.0.1:<0.12288.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:37] [ns_1@127.0.0.1:<0.12174.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:37] [ns_1@127.0.0.1:<0.12300.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:38] [ns_1@127.0.0.1:<0.12256.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:39] [ns_1@127.0.0.1:<0.12304.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:39] [ns_1@127.0.0.1:<0.12194.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:39] [ns_1@127.0.0.1:<0.12319.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:40] [ns_1@127.0.0.1:<0.12310.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:06:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12291.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:06:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12325.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:06:40] [ns_1@127.0.0.1:<0.12271.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:40] [ns_1@127.0.0.1:<0.12319.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:41] [ns_1@127.0.0.1:<0.12315.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:41] [ns_1@127.0.0.1:<0.12208.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:41] [ns_1@127.0.0.1:<0.12319.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:42] [ns_1@127.0.0.1:<0.12326.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:42] [ns_1@127.0.0.1:<0.12286.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:42] [ns_1@127.0.0.1:<0.12319.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:43] [ns_1@127.0.0.1:<0.12333.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:43] [ns_1@127.0.0.1:<0.12222.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:06:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.12319.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:06:44] [ns_1@127.0.0.1:<0.12341.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:44] [ns_1@127.0.0.1:<0.12302.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:45] [ns_1@127.0.0.1:<0.12346.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:06:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12325.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:06:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12359.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:06:45] [ns_1@127.0.0.1:<0.12235.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:46] [ns_1@127.0.0.1:<0.12353.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:46] [ns_1@127.0.0.1:<0.12313.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:47] [ns_1@127.0.0.1:<0.12360.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:47] [ns_1@127.0.0.1:<0.12249.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:48] [ns_1@127.0.0.1:<0.12368.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:48] [ns_1@127.0.0.1:<0.12329.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:49] [ns_1@127.0.0.1:<0.12372.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:49] [ns_1@127.0.0.1:<0.12265.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:49] [ns_1@127.0.0.1:<0.12387.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:50] [ns_1@127.0.0.1:<0.12379.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:06:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12359.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:06:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12393.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:06:50] [ns_1@127.0.0.1:<0.12344.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:50] [ns_1@127.0.0.1:<0.12387.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:51] [ns_1@127.0.0.1:<0.12383.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:51] [ns_1@127.0.0.1:<0.12278.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:51] [ns_1@127.0.0.1:<0.12387.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:52] [ns_1@127.0.0.1:<0.12394.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:52] [ns_1@127.0.0.1:<0.12356.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:52] [ns_1@127.0.0.1:<0.12387.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:53] [ns_1@127.0.0.1:<0.12403.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:53] [ns_1@127.0.0.1:<0.12280.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:06:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.12387.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:06:54] [ns_1@127.0.0.1:<0.12410.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:54] [ns_1@127.0.0.1:<0.12370.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:55] [ns_1@127.0.0.1:<0.12415.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:06:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12393.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:06:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12428.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:06:55] [ns_1@127.0.0.1:<0.12282.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:56] [ns_1@127.0.0.1:<0.12423.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:56] [ns_1@127.0.0.1:<0.12381.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:57] [ns_1@127.0.0.1:<0.12429.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:57] [ns_1@127.0.0.1:<0.12295.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:58] [ns_1@127.0.0.1:<0.12437.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:58] [ns_1@127.0.0.1:<0.12401.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:06:59] [ns_1@127.0.0.1:<0.12442.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:06:59] [ns_1@127.0.0.1:<0.12455.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:06:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,752816,68259}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38558488}, {processes,10327424}, {processes_used,8701944}, {system,28231064}, {atom,1306681}, {atom_used,1284164}, {binary,697768}, {code,12859877}, {ets,2432528}]}, {system_stats, [{cpu_utilization_rate,25.06265664160401}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,3837}, {memory_data,{4040077312,4012068864,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26856 kB\nBuffers: 60520 kB\nCached: 3528468 kB\nSwapCached: 0 kB\nActive: 309920 kB\nInactive: 3441188 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26856 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 80 kB\nWriteback: 0 kB\nAnonPages: 162120 kB\nMapped: 24872 kB\nSlab: 134368 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580176 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3613151232}, {buffered_memory,61972480}, {free_memory,27500544}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{3832805,0}}, {context_switches,{1235891,0}}, {garbage_collection,{649665,883320123,0}}, {io,{{input,23483077},{output,45564467}}}, {reductions,{265628136,615593}}, {run_queue,0}, {runtime,{51970,150}}]}]}] [stats:error] [2012-03-26 2:06:59] [ns_1@127.0.0.1:<0.12308.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:00] [ns_1@127.0.0.1:<0.12448.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:07:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12428.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:07:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12464.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:07:00] [ns_1@127.0.0.1:<0.12455.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:00] [ns_1@127.0.0.1:<0.12413.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:01] [ns_1@127.0.0.1:<0.12458.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:01] [ns_1@127.0.0.1:<0.12455.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:01] [ns_1@127.0.0.1:<0.12322.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:02] [ns_1@127.0.0.1:<0.12466.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:02] [ns_1@127.0.0.1:<0.12455.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:02] [ns_1@127.0.0.1:<0.12425.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:03] [ns_1@127.0.0.1:<0.12475.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:07:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.12455.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:07:03] [ns_1@127.0.0.1:<0.12339.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:04] [ns_1@127.0.0.1:<0.12481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:04] [ns_1@127.0.0.1:<0.12440.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:05] [ns_1@127.0.0.1:<0.12488.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:07:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12464.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:07:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12497.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:07:05] [ns_1@127.0.0.1:<0.12351.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:06] [ns_1@127.0.0.1:<0.12492.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:06] [ns_1@127.0.0.1:<0.12450.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:07] [ns_1@127.0.0.1:<0.12501.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:07] [ns_1@127.0.0.1:<0.12366.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:08] [ns_1@127.0.0.1:<0.12508.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:08] [ns_1@127.0.0.1:<0.12470.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:09] [ns_1@127.0.0.1:<0.12514.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:09] [ns_1@127.0.0.1:<0.12527.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:09] [ns_1@127.0.0.1:<0.12377.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:10] [ns_1@127.0.0.1:<0.12519.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:07:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12497.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:07:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12535.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:07:10] [ns_1@127.0.0.1:<0.12527.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:10] [ns_1@127.0.0.1:<0.12483.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:11] [ns_1@127.0.0.1:<0.12530.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:11] [ns_1@127.0.0.1:<0.12527.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:11] [ns_1@127.0.0.1:<0.12390.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:12] [ns_1@127.0.0.1:<0.12537.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:12] [ns_1@127.0.0.1:<0.12527.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:12] [ns_1@127.0.0.1:<0.12494.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:13] [ns_1@127.0.0.1:<0.12547.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:07:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.12527.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:07:14] [ns_1@127.0.0.1:<0.12408.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:14] [ns_1@127.0.0.1:<0.12552.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:15] [ns_1@127.0.0.1:<0.12510.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:07:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12535.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:07:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12567.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:07:15] [ns_1@127.0.0.1:<0.12559.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:16] [ns_1@127.0.0.1:<0.12421.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:16] [ns_1@127.0.0.1:<0.12564.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:17] [ns_1@127.0.0.1:<0.12521.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:17] [ns_1@127.0.0.1:<0.12572.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:18] [ns_1@127.0.0.1:<0.12435.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:18] [ns_1@127.0.0.1:<0.12578.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:19] [ns_1@127.0.0.1:<0.12541.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:19] [ns_1@127.0.0.1:<0.12585.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:19] [ns_1@127.0.0.1:<0.12595.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:20] [ns_1@127.0.0.1:<0.12446.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:07:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12567.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:07:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12601.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:07:20] [ns_1@127.0.0.1:<0.12589.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:20] [ns_1@127.0.0.1:<0.12595.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:21] [ns_1@127.0.0.1:<0.12554.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:21] [ns_1@127.0.0.1:<0.12598.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:21] [ns_1@127.0.0.1:<0.12595.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:22] [ns_1@127.0.0.1:<0.12461.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:22] [ns_1@127.0.0.1:<0.12609.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:22] [ns_1@127.0.0.1:<0.12595.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:23] [ns_1@127.0.0.1:<0.12568.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:23] [ns_1@127.0.0.1:<0.12616.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:07:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.12595.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:07:24] [ns_1@127.0.0.1:<0.12477.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:24] [ns_1@127.0.0.1:<0.12621.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:25] [ns_1@127.0.0.1:<0.12580.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:07:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12601.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:07:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12636.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:07:25] [ns_1@127.0.0.1:<0.12629.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:26] [ns_1@127.0.0.1:<0.12490.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:26] [ns_1@127.0.0.1:<0.12633.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:27] [ns_1@127.0.0.1:<0.12591.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:27] [ns_1@127.0.0.1:<0.12643.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:28] [ns_1@127.0.0.1:<0.12506.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:28] [ns_1@127.0.0.1:<0.12648.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:29] [ns_1@127.0.0.1:<0.12611.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:29] [ns_1@127.0.0.1:<0.12654.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:29] [ns_1@127.0.0.1:<0.12664.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:30] [ns_1@127.0.0.1:<0.12516.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:07:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12636.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:07:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12671.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:07:30] [ns_1@127.0.0.1:<0.12658.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:30] [ns_1@127.0.0.1:<0.12664.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:31] [ns_1@127.0.0.1:<0.12623.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:31] [ns_1@127.0.0.1:<0.12668.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:31] [ns_1@127.0.0.1:<0.12664.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:32] [ns_1@127.0.0.1:<0.12532.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:32] [ns_1@127.0.0.1:<0.12677.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:32] [ns_1@127.0.0.1:<0.12664.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:33] [ns_1@127.0.0.1:<0.12637.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:07:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.12664.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:07:33] [ns_1@127.0.0.1:<0.12684.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:34] [ns_1@127.0.0.1:<0.12549.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:34] [ns_1@127.0.0.1:<0.12561.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:34] [ns_1@127.0.0.1:<0.12576.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:34] [ns_1@127.0.0.1:<0.12690.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:35] [ns_1@127.0.0.1:<0.12650.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:07:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12671.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:07:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12708.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:07:35] [ns_1@127.0.0.1:<0.12697.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:36] [ns_1@127.0.0.1:<0.12587.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:36] [ns_1@127.0.0.1:<0.12705.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:37] [ns_1@127.0.0.1:<0.12660.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:37] [ns_1@127.0.0.1:<0.12699.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:38] [ns_1@127.0.0.1:<0.12602.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:38] [ns_1@127.0.0.1:<0.12721.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:39] [ns_1@127.0.0.1:<0.12679.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:39] [ns_1@127.0.0.1:<0.12736.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:39] [ns_1@127.0.0.1:<0.12701.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:40] [ns_1@127.0.0.1:<0.12618.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:07:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12708.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:07:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12744.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:07:40] [ns_1@127.0.0.1:<0.12736.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:40] [ns_1@127.0.0.1:<0.12732.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:41] [ns_1@127.0.0.1:<0.12695.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:41] [ns_1@127.0.0.1:<0.12736.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:41] [ns_1@127.0.0.1:<0.12717.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:42] [ns_1@127.0.0.1:<0.12631.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:42] [ns_1@127.0.0.1:<0.12736.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:42] [ns_1@127.0.0.1:<0.12750.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:43] [ns_1@127.0.0.1:<0.12712.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:07:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.12736.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:07:43] [ns_1@127.0.0.1:<0.12727.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:44] [ns_1@127.0.0.1:<0.12645.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:44] [ns_1@127.0.0.1:<0.12763.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:45] [ns_1@127.0.0.1:<0.12725.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:07:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12744.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:07:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12778.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:07:45] [ns_1@127.0.0.1:<0.12741.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:46] [ns_1@127.0.0.1:<0.12656.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:46] [ns_1@127.0.0.1:<0.12775.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:47] [ns_1@127.0.0.1:<0.12739.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:48] [ns_1@127.0.0.1:<0.12758.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:48] [ns_1@127.0.0.1:<0.12672.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:49] [ns_1@127.0.0.1:<0.12789.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:49] [ns_1@127.0.0.1:<0.12756.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:49] [ns_1@127.0.0.1:<0.12804.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:50] [ns_1@127.0.0.1:<0.12770.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:07:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12778.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:07:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12810.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:07:50] [ns_1@127.0.0.1:<0.12686.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:50] [ns_1@127.0.0.1:<0.12804.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:51] [ns_1@127.0.0.1:<0.12800.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:51] [ns_1@127.0.0.1:<0.12768.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:51] [ns_1@127.0.0.1:<0.12804.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:52] [ns_1@127.0.0.1:<0.12785.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:52] [ns_1@127.0.0.1:<0.12703.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:52] [ns_1@127.0.0.1:<0.12804.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:53] [ns_1@127.0.0.1:<0.12820.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:53] [ns_1@127.0.0.1:<0.12781.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:07:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.12804.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:07:54] [ns_1@127.0.0.1:<0.12796.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:54] [ns_1@127.0.0.1:<0.12719.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:55] [ns_1@127.0.0.1:<0.12832.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:07:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12810.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:07:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12845.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:07:55] [ns_1@127.0.0.1:<0.12794.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:56] [ns_1@127.0.0.1:<0.12811.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:56] [ns_1@127.0.0.1:<0.12730.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:57] [ns_1@127.0.0.1:<0.12846.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:57] [ns_1@127.0.0.1:<0.12807.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:58] [ns_1@127.0.0.1:<0.12827.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:58] [ns_1@127.0.0.1:<0.12746.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:59] [ns_1@127.0.0.1:<0.12859.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:07:59] [ns_1@127.0.0.1:<0.12825.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:07:59] [ns_1@127.0.0.1:<0.12874.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:07:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,752876,97326}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38457640}, {processes,10247696}, {processes_used,8622216}, {system,28209944}, {atom,1306681}, {atom_used,1284164}, {binary,697960}, {code,12859877}, {ets,2404424}]}, {system_stats, [{cpu_utilization_rate,25.25}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,3897}, {memory_data,{4040077312,4012576768,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26484 kB\nBuffers: 60568 kB\nCached: 3528644 kB\nSwapCached: 0 kB\nActive: 310080 kB\nInactive: 3441256 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26484 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 72 kB\nWriteback: 16 kB\nAnonPages: 162136 kB\nMapped: 24872 kB\nSlab: 134392 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580176 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3613331456}, {buffered_memory,62021632}, {free_memory,27119616}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{3892835,0}}, {context_switches,{1248979,0}}, {garbage_collection,{656849,894597784,0}}, {io,{{input,23513675},{output,46007231}}}, {reductions,{268229431,615851}}, {run_queue,0}, {runtime,{52580,150}}]}]}] [stats:error] [2012-03-26 2:08:00] [ns_1@127.0.0.1:<0.12840.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:08:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12845.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:08:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12881.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:08:00] [ns_1@127.0.0.1:<0.12761.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:00] [ns_1@127.0.0.1:<0.12874.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:01] [ns_1@127.0.0.1:<0.12869.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:01] [ns_1@127.0.0.1:<0.12838.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:01] [ns_1@127.0.0.1:<0.12874.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:02] [ns_1@127.0.0.1:<0.12854.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:02] [ns_1@127.0.0.1:<0.12773.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:02] [ns_1@127.0.0.1:<0.12874.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:03] [ns_1@127.0.0.1:<0.12889.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:03] [ns_1@127.0.0.1:<0.12852.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:08:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.12874.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:08:04] [ns_1@127.0.0.1:<0.12865.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:04] [ns_1@127.0.0.1:<0.12787.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:05] [ns_1@127.0.0.1:<0.12902.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:08:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12881.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:08:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12914.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:08:05] [ns_1@127.0.0.1:<0.12863.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:06] [ns_1@127.0.0.1:<0.12882.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:06] [ns_1@127.0.0.1:<0.12798.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:07] [ns_1@127.0.0.1:<0.12915.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:07] [ns_1@127.0.0.1:<0.12878.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:08] [ns_1@127.0.0.1:<0.12896.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:08] [ns_1@127.0.0.1:<0.12815.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:09] [ns_1@127.0.0.1:<0.12931.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:09] [ns_1@127.0.0.1:<0.12944.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:09] [ns_1@127.0.0.1:<0.12894.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:10] [ns_1@127.0.0.1:<0.12909.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:08:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12914.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:08:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12952.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:08:10] [ns_1@127.0.0.1:<0.12944.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:10] [ns_1@127.0.0.1:<0.12830.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:11] [ns_1@127.0.0.1:<0.12947.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:11] [ns_1@127.0.0.1:<0.12944.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:11] [ns_1@127.0.0.1:<0.12907.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:12] [ns_1@127.0.0.1:<0.12925.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:12] [ns_1@127.0.0.1:<0.12944.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:12] [ns_1@127.0.0.1:<0.12842.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:13] [ns_1@127.0.0.1:<0.12964.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:08:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.12944.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:08:13] [ns_1@127.0.0.1:<0.12923.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:14] [ns_1@127.0.0.1:<0.12935.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:14] [ns_1@127.0.0.1:<0.12857.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:15] [ns_1@127.0.0.1:<0.12976.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:08:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12952.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:08:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.12986.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:08:15] [ns_1@127.0.0.1:<0.12933.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:16] [ns_1@127.0.0.1:<0.12954.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:16] [ns_1@127.0.0.1:<0.12867.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:17] [ns_1@127.0.0.1:<0.12989.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:17] [ns_1@127.0.0.1:<0.12949.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:18] [ns_1@127.0.0.1:<0.12969.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:18] [ns_1@127.0.0.1:<0.12887.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:19] [ns_1@127.0.0.1:<0.13002.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:19] [ns_1@127.0.0.1:<0.13012.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:19] [ns_1@127.0.0.1:<0.12966.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:20] [ns_1@127.0.0.1:<0.12981.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:08:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.12986.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:08:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13020.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:08:20] [ns_1@127.0.0.1:<0.13012.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:20] [ns_1@127.0.0.1:<0.12900.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:21] [ns_1@127.0.0.1:<0.13015.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:21] [ns_1@127.0.0.1:<0.13012.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:21] [ns_1@127.0.0.1:<0.12978.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:22] [ns_1@127.0.0.1:<0.12995.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:22] [ns_1@127.0.0.1:<0.13012.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:23] [ns_1@127.0.0.1:<0.12911.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:23] [ns_1@127.0.0.1:<0.13033.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:08:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.13012.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:08:24] [ns_1@127.0.0.1:<0.12993.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:24] [ns_1@127.0.0.1:<0.13006.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:25] [ns_1@127.0.0.1:<0.12927.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:08:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13020.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:08:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13053.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:08:25] [ns_1@127.0.0.1:<0.13046.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:26] [ns_1@127.0.0.1:<0.13004.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:26] [ns_1@127.0.0.1:<0.13023.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:27] [ns_1@127.0.0.1:<0.12938.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:27] [ns_1@127.0.0.1:<0.13058.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:28] [ns_1@127.0.0.1:<0.13017.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:28] [ns_1@127.0.0.1:<0.13038.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:29] [ns_1@127.0.0.1:<0.12958.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:29] [ns_1@127.0.0.1:<0.13071.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:29] [ns_1@127.0.0.1:<0.13081.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:30] [ns_1@127.0.0.1:<0.13035.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:08:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13053.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:08:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13088.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:08:30] [ns_1@127.0.0.1:<0.13050.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:30] [ns_1@127.0.0.1:<0.13081.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:31] [ns_1@127.0.0.1:<0.12971.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:31] [ns_1@127.0.0.1:<0.13085.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:31] [ns_1@127.0.0.1:<0.13081.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:32] [ns_1@127.0.0.1:<0.13048.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:32] [ns_1@127.0.0.1:<0.13065.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:32] [ns_1@127.0.0.1:<0.13081.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:33] [ns_1@127.0.0.1:<0.12983.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:33] [ns_1@127.0.0.1:<0.13101.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:08:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.13081.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:08:34] [ns_1@127.0.0.1:<0.13062.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:34] [ns_1@127.0.0.1:<0.13075.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:34] [ns_1@127.0.0.1:<0.13092.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:34] [ns_1@127.0.0.1:<0.13107.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:35] [ns_1@127.0.0.1:<0.12997.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:08:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13088.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:08:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13125.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:08:35] [ns_1@127.0.0.1:<0.13114.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:36] [ns_1@127.0.0.1:<0.13073.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:36] [ns_1@127.0.0.1:<0.13122.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:37] [ns_1@127.0.0.1:<0.13008.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:37] [ns_1@127.0.0.1:<0.13134.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:38] [ns_1@127.0.0.1:<0.13089.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:38] [ns_1@127.0.0.1:<0.13138.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:39] [ns_1@127.0.0.1:<0.13028.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:39] [ns_1@127.0.0.1:<0.13144.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:39] [ns_1@127.0.0.1:<0.13155.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:40] [ns_1@127.0.0.1:<0.13103.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:08:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13125.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:08:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13161.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:08:40] [ns_1@127.0.0.1:<0.13149.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:40] [ns_1@127.0.0.1:<0.13155.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:41] [ns_1@127.0.0.1:<0.13040.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:41] [ns_1@127.0.0.1:<0.13158.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:41] [ns_1@127.0.0.1:<0.13155.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:42] [ns_1@127.0.0.1:<0.13116.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:42] [ns_1@127.0.0.1:<0.13155.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:42] [ns_1@127.0.0.1:<0.13167.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:43] [ns_1@127.0.0.1:<0.13054.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:08:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.13155.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:08:43] [ns_1@127.0.0.1:<0.13175.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:44] [ns_1@127.0.0.1:<0.13118.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:44] [ns_1@127.0.0.1:<0.13180.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:08:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13161.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:08:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13193.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:08:45] [ns_1@127.0.0.1:<0.13067.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:46] [ns_1@127.0.0.1:<0.13187.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:46] [ns_1@127.0.0.1:<0.13120.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:47] [ns_1@127.0.0.1:<0.13194.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:47] [ns_1@127.0.0.1:<0.13077.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:48] [ns_1@127.0.0.1:<0.13202.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:48] [ns_1@127.0.0.1:<0.13136.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:49] [ns_1@127.0.0.1:<0.13206.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:49] [ns_1@127.0.0.1:<0.13096.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:49] [ns_1@127.0.0.1:<0.13221.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:50] [ns_1@127.0.0.1:<0.13213.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:08:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13193.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:08:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13227.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:08:50] [ns_1@127.0.0.1:<0.13146.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:50] [ns_1@127.0.0.1:<0.13221.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:51] [ns_1@127.0.0.1:<0.13217.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:51] [ns_1@127.0.0.1:<0.13109.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:51] [ns_1@127.0.0.1:<0.13221.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:52] [ns_1@127.0.0.1:<0.13228.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:52] [ns_1@127.0.0.1:<0.13162.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:52] [ns_1@127.0.0.1:<0.13221.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:53] [ns_1@127.0.0.1:<0.13237.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:53] [ns_1@127.0.0.1:<0.13126.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:08:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.13221.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:08:54] [ns_1@127.0.0.1:<0.13244.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:54] [ns_1@127.0.0.1:<0.13178.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:55] [ns_1@127.0.0.1:<0.13249.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:08:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13227.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:08:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13262.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:08:55] [ns_1@127.0.0.1:<0.13140.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:56] [ns_1@127.0.0.1:<0.13257.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:56] [ns_1@127.0.0.1:<0.13189.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:57] [ns_1@127.0.0.1:<0.13263.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:57] [ns_1@127.0.0.1:<0.13151.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:58] [ns_1@127.0.0.1:<0.13271.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:58] [ns_1@127.0.0.1:<0.13204.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:59] [ns_1@127.0.0.1:<0.13276.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:08:59] [ns_1@127.0.0.1:<0.13171.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:08:59] [ns_1@127.0.0.1:<0.13298.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:08:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,752936,126940}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38595560}, {processes,10343656}, {processes_used,8718176}, {system,28251904}, {atom,1306681}, {atom_used,1284164}, {binary,704816}, {code,12859877}, {ets,2433128}]}, {system_stats, [{cpu_utilization_rate,25.06265664160401}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,3957}, {memory_data,{4040077312,4012957696,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26236 kB\nBuffers: 60640 kB\nCached: 3528800 kB\nSwapCached: 0 kB\nActive: 310220 kB\nInactive: 3441328 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26236 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 64 kB\nWriteback: 16 kB\nAnonPages: 162144 kB\nMapped: 24872 kB\nSlab: 134372 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580176 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3613491200}, {buffered_memory,62095360}, {free_memory,26865664}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{3952863,0}}, {context_switches,{1261994,0}}, {garbage_collection,{664087,905793874,0}}, {io,{{input,23544282},{output,46449679}}}, {reductions,{270830386,638638}}, {run_queue,0}, {runtime,{53170,130}}]}]}] [stats:error] [2012-03-26 2:09:00] [ns_1@127.0.0.1:<0.13282.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:09:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13262.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:09:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13305.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:09:00] [ns_1@127.0.0.1:<0.13215.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:00] [ns_1@127.0.0.1:<0.13298.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:01] [ns_1@127.0.0.1:<0.13293.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:01] [ns_1@127.0.0.1:<0.13185.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:01] [ns_1@127.0.0.1:<0.13298.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:02] [ns_1@127.0.0.1:<0.13306.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:02] [ns_1@127.0.0.1:<0.13298.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:02] [ns_1@127.0.0.1:<0.13232.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:03] [ns_1@127.0.0.1:<0.13315.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:09:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.13298.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:09:03] [ns_1@127.0.0.1:<0.13198.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:04] [ns_1@127.0.0.1:<0.13321.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:04] [ns_1@127.0.0.1:<0.13247.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:05] [ns_1@127.0.0.1:<0.13329.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:09:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13305.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:09:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13344.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:09:05] [ns_1@127.0.0.1:<0.13211.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:06] [ns_1@127.0.0.1:<0.13339.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:06] [ns_1@127.0.0.1:<0.13259.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:07] [ns_1@127.0.0.1:<0.13348.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:07] [ns_1@127.0.0.1:<0.13224.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:08] [ns_1@127.0.0.1:<0.13355.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:08] [ns_1@127.0.0.1:<0.13274.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:09] [ns_1@127.0.0.1:<0.13361.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:09] [ns_1@127.0.0.1:<0.13374.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:09] [ns_1@127.0.0.1:<0.13242.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:10] [ns_1@127.0.0.1:<0.13366.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:09:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13344.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:09:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13382.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:09:10] [ns_1@127.0.0.1:<0.13374.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:10] [ns_1@127.0.0.1:<0.13284.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:11] [ns_1@127.0.0.1:<0.13377.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:11] [ns_1@127.0.0.1:<0.13374.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:11] [ns_1@127.0.0.1:<0.13255.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:12] [ns_1@127.0.0.1:<0.13384.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:12] [ns_1@127.0.0.1:<0.13374.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:12] [ns_1@127.0.0.1:<0.13311.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:13] [ns_1@127.0.0.1:<0.13394.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:09:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.13374.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:09:13] [ns_1@127.0.0.1:<0.13269.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:14] [ns_1@127.0.0.1:<0.13399.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:14] [ns_1@127.0.0.1:<0.13324.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:15] [ns_1@127.0.0.1:<0.13406.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:09:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13382.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:09:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13416.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:09:15] [ns_1@127.0.0.1:<0.13280.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:16] [ns_1@127.0.0.1:<0.13411.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:16] [ns_1@127.0.0.1:<0.13341.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:17] [ns_1@127.0.0.1:<0.13419.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:18] [ns_1@127.0.0.1:<0.13302.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:18] [ns_1@127.0.0.1:<0.13425.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:19] [ns_1@127.0.0.1:<0.13357.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:19] [ns_1@127.0.0.1:<0.13432.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:19] [ns_1@127.0.0.1:<0.13442.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:20] [ns_1@127.0.0.1:<0.13318.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:09:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13416.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:09:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13448.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:09:20] [ns_1@127.0.0.1:<0.13436.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:20] [ns_1@127.0.0.1:<0.13442.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:21] [ns_1@127.0.0.1:<0.13368.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:21] [ns_1@127.0.0.1:<0.13445.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:21] [ns_1@127.0.0.1:<0.13442.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:22] [ns_1@127.0.0.1:<0.13337.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:22] [ns_1@127.0.0.1:<0.13453.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:22] [ns_1@127.0.0.1:<0.13442.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:23] [ns_1@127.0.0.1:<0.13388.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:23] [ns_1@127.0.0.1:<0.13463.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:09:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.13442.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:09:24] [ns_1@127.0.0.1:<0.13353.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:24] [ns_1@127.0.0.1:<0.13468.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:25] [ns_1@127.0.0.1:<0.13401.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:09:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13448.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:09:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13483.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:09:25] [ns_1@127.0.0.1:<0.13476.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:26] [ns_1@127.0.0.1:<0.13363.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:26] [ns_1@127.0.0.1:<0.13480.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:27] [ns_1@127.0.0.1:<0.13413.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:27] [ns_1@127.0.0.1:<0.13488.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:28] [ns_1@127.0.0.1:<0.13379.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:28] [ns_1@127.0.0.1:<0.13495.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:29] [ns_1@127.0.0.1:<0.13427.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:29] [ns_1@127.0.0.1:<0.13501.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:29] [ns_1@127.0.0.1:<0.13511.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:30] [ns_1@127.0.0.1:<0.13396.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:09:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13483.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:09:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13518.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:09:30] [ns_1@127.0.0.1:<0.13505.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:30] [ns_1@127.0.0.1:<0.13511.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:31] [ns_1@127.0.0.1:<0.13438.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:31] [ns_1@127.0.0.1:<0.13515.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:31] [ns_1@127.0.0.1:<0.13511.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:32] [ns_1@127.0.0.1:<0.13408.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:32] [ns_1@127.0.0.1:<0.13524.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:32] [ns_1@127.0.0.1:<0.13511.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:33] [ns_1@127.0.0.1:<0.13458.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:33] [ns_1@127.0.0.1:<0.13531.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:09:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.13511.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:09:34] [ns_1@127.0.0.1:<0.13423.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:34] [ns_1@127.0.0.1:<0.13537.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:34] [ns_1@127.0.0.1:<0.13470.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:34] [ns_1@127.0.0.1:<0.13484.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:34] [ns_1@127.0.0.1:<0.13497.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:34] [ns_1@127.0.0.1:<0.13507.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:35] [ns_1@127.0.0.1:<0.13526.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:09:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13518.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:09:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13559.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:09:35] [ns_1@127.0.0.1:<0.13544.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:36] [ns_1@127.0.0.1:<0.13434.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:36] [ns_1@127.0.0.1:<0.13556.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:37] [ns_1@127.0.0.1:<0.13539.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:37] [ns_1@127.0.0.1:<0.13568.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:38] [ns_1@127.0.0.1:<0.13449.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:38] [ns_1@127.0.0.1:<0.13572.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:39] [ns_1@127.0.0.1:<0.13562.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:39] [ns_1@127.0.0.1:<0.13587.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:39] [ns_1@127.0.0.1:<0.13578.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:40] [ns_1@127.0.0.1:<0.13465.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:09:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13559.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:09:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13595.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:09:40] [ns_1@127.0.0.1:<0.13587.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:40] [ns_1@127.0.0.1:<0.13583.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:41] [ns_1@127.0.0.1:<0.13576.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:41] [ns_1@127.0.0.1:<0.13587.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:41] [ns_1@127.0.0.1:<0.13592.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:42] [ns_1@127.0.0.1:<0.13478.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:42] [ns_1@127.0.0.1:<0.13587.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:42] [ns_1@127.0.0.1:<0.13601.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:43] [ns_1@127.0.0.1:<0.13590.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:09:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.13587.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:09:43] [ns_1@127.0.0.1:<0.13609.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:44] [ns_1@127.0.0.1:<0.13492.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:44] [ns_1@127.0.0.1:<0.13614.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:45] [ns_1@127.0.0.1:<0.13607.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:09:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13595.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:09:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13629.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:09:45] [ns_1@127.0.0.1:<0.13621.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:46] [ns_1@127.0.0.1:<0.13503.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:46] [ns_1@127.0.0.1:<0.13626.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:47] [ns_1@127.0.0.1:<0.13619.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:47] [ns_1@127.0.0.1:<0.13636.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:48] [ns_1@127.0.0.1:<0.13519.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:48] [ns_1@127.0.0.1:<0.13640.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:49] [ns_1@127.0.0.1:<0.13632.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:49] [ns_1@127.0.0.1:<0.13655.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:49] [ns_1@127.0.0.1:<0.13647.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:50] [ns_1@127.0.0.1:<0.13533.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:09:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13629.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:09:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13663.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:09:50] [ns_1@127.0.0.1:<0.13655.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:51] [ns_1@127.0.0.1:<0.13651.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:51] [ns_1@127.0.0.1:<0.13645.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:51] [ns_1@127.0.0.1:<0.13655.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:52] [ns_1@127.0.0.1:<0.13660.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:52] [ns_1@127.0.0.1:<0.13546.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:52] [ns_1@127.0.0.1:<0.13655.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:53] [ns_1@127.0.0.1:<0.13671.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:53] [ns_1@127.0.0.1:<0.13658.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:09:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.13655.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:09:54] [ns_1@127.0.0.1:<0.13678.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:54] [ns_1@127.0.0.1:<0.13548.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:55] [ns_1@127.0.0.1:<0.13683.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:09:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13663.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:09:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13696.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:09:55] [ns_1@127.0.0.1:<0.13676.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:56] [ns_1@127.0.0.1:<0.13691.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:56] [ns_1@127.0.0.1:<0.13550.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:57] [ns_1@127.0.0.1:<0.13697.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:57] [ns_1@127.0.0.1:<0.13689.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:58] [ns_1@127.0.0.1:<0.13705.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:58] [ns_1@127.0.0.1:<0.13552.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:59] [ns_1@127.0.0.1:<0.13710.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:09:59] [ns_1@127.0.0.1:<0.13701.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:09:59] [ns_1@127.0.0.1:<0.13725.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:09:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,752996,150489}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38398384}, {processes,10169672}, {processes_used,8544192}, {system,28228712}, {atom,1306681}, {atom_used,1284164}, {binary,702176}, {code,12859877}, {ets,2406456}]}, {system_stats, [{cpu_utilization_rate,25.43640897755611}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,4017}, {memory_data,{4040077312,4013211648,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26112 kB\nBuffers: 60772 kB\nCached: 3528548 kB\nSwapCached: 0 kB\nActive: 310260 kB\nInactive: 3441312 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26112 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 72 kB\nWriteback: 0 kB\nAnonPages: 162244 kB\nMapped: 24872 kB\nSlab: 134356 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580176 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3613233152}, {buffered_memory,62230528}, {free_memory,26738688}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{4012888,0}}, {context_switches,{1275780,0}}, {garbage_collection,{671897,917496052,0}}, {io,{{input,23820608},{output,47484617}}}, {reductions,{273883427,619679}}, {run_queue,0}, {runtime,{53800,140}}]}]}] [stats:error] [2012-03-26 2:10:00] [ns_1@127.0.0.1:<0.13716.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:10:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13696.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:10:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13732.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:10:00] [ns_1@127.0.0.1:<0.13554.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:00] [ns_1@127.0.0.1:<0.13725.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:01] [ns_1@127.0.0.1:<0.13720.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:01] [ns_1@127.0.0.1:<0.13714.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:01] [ns_1@127.0.0.1:<0.13725.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:02] [ns_1@127.0.0.1:<0.13733.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:02] [ns_1@127.0.0.1:<0.13570.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:02] [ns_1@127.0.0.1:<0.13725.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:03] [ns_1@127.0.0.1:<0.13740.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:03] [ns_1@127.0.0.1:<0.13729.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:10:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.13725.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:10:04] [ns_1@127.0.0.1:<0.13747.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:04] [ns_1@127.0.0.1:<0.13581.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:05] [ns_1@127.0.0.1:<0.13753.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:10:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13732.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:10:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13765.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:10:05] [ns_1@127.0.0.1:<0.13745.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:06] [ns_1@127.0.0.1:<0.13760.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:06] [ns_1@127.0.0.1:<0.13597.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:07] [ns_1@127.0.0.1:<0.13766.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:07] [ns_1@127.0.0.1:<0.13758.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:08] [ns_1@127.0.0.1:<0.13776.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:08] [ns_1@127.0.0.1:<0.13612.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:09] [ns_1@127.0.0.1:<0.13780.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:09] [ns_1@127.0.0.1:<0.13774.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:09] [ns_1@127.0.0.1:<0.13797.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:10] [ns_1@127.0.0.1:<0.13786.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:10:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13765.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:10:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13803.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:10:10] [ns_1@127.0.0.1:<0.13797.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:10] [ns_1@127.0.0.1:<0.13624.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:11] [ns_1@127.0.0.1:<0.13793.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:11] [ns_1@127.0.0.1:<0.13797.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:11] [ns_1@127.0.0.1:<0.13784.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:12] [ns_1@127.0.0.1:<0.13805.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:12] [ns_1@127.0.0.1:<0.13797.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:12] [ns_1@127.0.0.1:<0.13638.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:13] [ns_1@127.0.0.1:<0.13814.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:10:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.13797.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:10:13] [ns_1@127.0.0.1:<0.13800.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:14] [ns_1@127.0.0.1:<0.13820.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:14] [ns_1@127.0.0.1:<0.13649.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:15] [ns_1@127.0.0.1:<0.13827.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:10:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13803.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:10:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13837.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:10:15] [ns_1@127.0.0.1:<0.13817.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:16] [ns_1@127.0.0.1:<0.13832.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:16] [ns_1@127.0.0.1:<0.13666.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:17] [ns_1@127.0.0.1:<0.13840.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:17] [ns_1@127.0.0.1:<0.13829.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:18] [ns_1@127.0.0.1:<0.13846.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:18] [ns_1@127.0.0.1:<0.13681.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:19] [ns_1@127.0.0.1:<0.13853.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:19] [ns_1@127.0.0.1:<0.13863.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:19] [ns_1@127.0.0.1:<0.13844.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:20] [ns_1@127.0.0.1:<0.13857.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:10:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13837.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:10:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13871.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:10:20] [ns_1@127.0.0.1:<0.13863.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:20] [ns_1@127.0.0.1:<0.13693.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:21] [ns_1@127.0.0.1:<0.13866.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:21] [ns_1@127.0.0.1:<0.13863.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:21] [ns_1@127.0.0.1:<0.13855.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:22] [ns_1@127.0.0.1:<0.13874.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:22] [ns_1@127.0.0.1:<0.13863.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:22] [ns_1@127.0.0.1:<0.13708.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:23] [ns_1@127.0.0.1:<0.13884.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:10:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.13863.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:10:23] [ns_1@127.0.0.1:<0.13868.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:24] [ns_1@127.0.0.1:<0.13889.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:24] [ns_1@127.0.0.1:<0.13718.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:25] [ns_1@127.0.0.1:<0.13897.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:10:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13871.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:10:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13906.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:10:26] [ns_1@127.0.0.1:<0.13886.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:26] [ns_1@127.0.0.1:<0.13901.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:27] [ns_1@127.0.0.1:<0.13736.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:27] [ns_1@127.0.0.1:<0.13909.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:28] [ns_1@127.0.0.1:<0.13899.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:28] [ns_1@127.0.0.1:<0.13916.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:29] [ns_1@127.0.0.1:<0.13751.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:29] [ns_1@127.0.0.1:<0.13922.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:29] [ns_1@127.0.0.1:<0.13932.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:30] [ns_1@127.0.0.1:<0.13913.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:10:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13906.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:10:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.13939.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:10:30] [ns_1@127.0.0.1:<0.13926.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:30] [ns_1@127.0.0.1:<0.13932.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:31] [ns_1@127.0.0.1:<0.13762.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:31] [ns_1@127.0.0.1:<0.13936.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:31] [ns_1@127.0.0.1:<0.13932.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:32] [ns_1@127.0.0.1:<0.13924.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:32] [ns_1@127.0.0.1:<0.13943.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:32] [ns_1@127.0.0.1:<0.13932.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:33] [ns_1@127.0.0.1:<0.13778.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:33] [ns_1@127.0.0.1:<0.13952.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:10:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.13932.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:10:34] [ns_1@127.0.0.1:<0.13940.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:34] [ns_1@127.0.0.1:<0.13958.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:35] [ns_1@127.0.0.1:<0.13789.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:35] [ns_1@127.0.0.1:<0.13834.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:35] [ns_1@127.0.0.1:<0.13848.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:35] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 2:10:35] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 2:10:35] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:10:35] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:10:35] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:warn] [2012-03-26 2:10:40] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 2:10:40] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 2:10:40] [ns_1@127.0.0.1:<0.13859.1>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 2:10:40] [ns_1@127.0.0.1:<0.13971.1>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 2:10:40] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 2:10:40] [ns_1@127.0.0.1:<0.14009.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:40] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 2:10:40] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:10:40] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:10:40] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 2:10:40] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [stats:error] [2012-03-26 2:10:40] [ns_1@127.0.0.1:<0.13974.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:40] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:10:40] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:10:40] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 2:10:40] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 02:10:42: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [error_logger:error] [2012-03-26 2:10:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.13939.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:10:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14020.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:10:40] [ns_1@127.0.0.1:<0.13977.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:41] [ns_1@127.0.0.1:<0.14009.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:41] [ns_1@127.0.0.1:<0.13879.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:41] [ns_1@127.0.0.1:<0.13965.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:42] [ns_1@127.0.0.1:<0.14009.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:42] [ns_1@127.0.0.1:<0.14021.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:42] [ns_1@127.0.0.1:<0.13979.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:43] [ns_1@127.0.0.1:<0.14009.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:43] [ns_1@127.0.0.1:<0.13891.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:43] [ns_1@127.0.0.1:<0.14002.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:44] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:10:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.14009.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:10:44] [ns_1@127.0.0.1:<0.14036.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:44] [ns_1@127.0.0.1:<0.13981.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:45] [ns_1@127.0.0.1:<0.13903.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:10:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14020.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:10:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14054.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:10:45] [ns_1@127.0.0.1:<0.14003.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:46] [ns_1@127.0.0.1:<0.14048.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:46] [ns_1@127.0.0.1:<0.14026.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:47] [ns_1@127.0.0.1:<0.13918.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:47] [ns_1@127.0.0.1:<0.14004.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:48] [ns_1@127.0.0.1:<0.14063.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:48] [ns_1@127.0.0.1:<0.14039.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:49] [ns_1@127.0.0.1:<0.13928.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:49] [ns_1@127.0.0.1:<0.14080.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:49] [ns_1@127.0.0.1:<0.14005.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:50] [ns_1@127.0.0.1:<0.14074.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:10:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14054.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:10:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14088.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:10:50] [ns_1@127.0.0.1:<0.14080.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:50] [ns_1@127.0.0.1:<0.14051.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:51] [ns_1@127.0.0.1:<0.13947.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:51] [ns_1@127.0.0.1:<0.14080.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:51] [ns_1@127.0.0.1:<0.14013.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:52] [ns_1@127.0.0.1:<0.14091.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:52] [ns_1@127.0.0.1:<0.14080.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:52] [ns_1@127.0.0.1:<0.14065.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:53] [ns_1@127.0.0.1:<0.13960.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:10:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.14080.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:10:53] [ns_1@127.0.0.1:<0.14034.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:54] [ns_1@127.0.0.1:<0.14106.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:54] [ns_1@127.0.0.1:<0.14076.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:55] [ns_1@127.0.0.1:<0.14028.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:10:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14088.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:10:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14123.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:10:55] [ns_1@127.0.0.1:<0.14046.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:56] [ns_1@127.0.0.1:<0.14118.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:56] [ns_1@127.0.0.1:<0.14096.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:57] [ns_1@127.0.0.1:<0.14043.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:57] [ns_1@127.0.0.1:<0.14061.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:58] [ns_1@127.0.0.1:<0.14133.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:58] [ns_1@127.0.0.1:<0.14108.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:10:59] [ns_1@127.0.0.1:<0.14057.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:10:59] [ns_1@127.0.0.1:<0.14150.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:10:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,753056,176792}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37920696}, {processes,10020760}, {processes_used,8395280}, {system,27899936}, {atom,1306681}, {atom_used,1284164}, {binary,338344}, {code,12859877}, {ets,2435104}]}, {system_stats, [{cpu_utilization_rate,25.12562814070352}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,4077}, {memory_data,{4040077312,4013338624,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25988 kB\nBuffers: 60864 kB\nCached: 3528704 kB\nSwapCached: 0 kB\nActive: 310136 kB\nInactive: 3441448 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25988 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 88 kB\nWriteback: 0 kB\nAnonPages: 162020 kB\nMapped: 24872 kB\nSlab: 134376 kB\nPageTables: 6472 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582684 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3613392896}, {buffered_memory,62324736}, {free_memory,26611712}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{4072914,0}}, {context_switches,{1288997,0}}, {garbage_collection,{679181,929134400,0}}, {io,{{input,23857051},{output,47970370}}}, {reductions,{276515199,637927}}, {run_queue,0}, {runtime,{54400,140}}]}]}] [stats:error] [2012-03-26 2:11:00] [ns_1@127.0.0.1:<0.14072.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:00] [ns_1@127.0.0.1:<0.14143.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:11:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14123.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:11:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14159.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:11:00] [ns_1@127.0.0.1:<0.14150.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:01] [ns_1@127.0.0.1:<0.14120.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:01] [ns_1@127.0.0.1:<0.14070.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:01] [ns_1@127.0.0.1:<0.14150.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:02] [ns_1@127.0.0.1:<0.14085.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:02] [ns_1@127.0.0.1:<0.14161.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:02] [ns_1@127.0.0.1:<0.14150.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:03] [ns_1@127.0.0.1:<0.14135.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:03] [ns_1@127.0.0.1:<0.14083.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:11:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.14150.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:11:04] [ns_1@127.0.0.1:<0.14103.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:04] [ns_1@127.0.0.1:<0.14176.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:05] [ns_1@127.0.0.1:<0.14145.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:11:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14159.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:11:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14190.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:11:05] [ns_1@127.0.0.1:<0.14101.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:06] [ns_1@127.0.0.1:<0.14116.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:06] [ns_1@127.0.0.1:<0.14187.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:07] [ns_1@127.0.0.1:<0.14165.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:07] [ns_1@127.0.0.1:<0.14114.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:08] [ns_1@127.0.0.1:<0.14130.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:08] [ns_1@127.0.0.1:<0.14203.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:09] [ns_1@127.0.0.1:<0.14178.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:09] [ns_1@127.0.0.1:<0.14126.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:09] [ns_1@127.0.0.1:<0.14222.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:10] [ns_1@127.0.0.1:<0.14141.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:11:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14190.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:11:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14228.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:11:10] [ns_1@127.0.0.1:<0.14214.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:10] [ns_1@127.0.0.1:<0.14222.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:11] [ns_1@127.0.0.1:<0.14191.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:11] [ns_1@127.0.0.1:<0.14139.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:11] [ns_1@127.0.0.1:<0.14222.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:12] [ns_1@127.0.0.1:<0.14156.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:12] [ns_1@127.0.0.1:<0.14232.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:12] [ns_1@127.0.0.1:<0.14222.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:13] [ns_1@127.0.0.1:<0.14205.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:13] [ns_1@127.0.0.1:<0.14154.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:11:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.14222.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:11:14] [ns_1@127.0.0.1:<0.14172.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:14] [ns_1@127.0.0.1:<0.14247.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:15] [ns_1@127.0.0.1:<0.14216.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:11:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14228.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:11:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14262.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:11:15] [ns_1@127.0.0.1:<0.14170.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:16] [ns_1@127.0.0.1:<0.14185.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:16] [ns_1@127.0.0.1:<0.14259.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:17] [ns_1@127.0.0.1:<0.14236.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:17] [ns_1@127.0.0.1:<0.14183.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:18] [ns_1@127.0.0.1:<0.14201.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:18] [ns_1@127.0.0.1:<0.14273.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:19] [ns_1@127.0.0.1:<0.14249.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:19] [ns_1@127.0.0.1:<0.14288.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:19] [ns_1@127.0.0.1:<0.14196.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:20] [ns_1@127.0.0.1:<0.14211.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:11:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14262.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:11:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14296.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:11:20] [ns_1@127.0.0.1:<0.14288.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:20] [ns_1@127.0.0.1:<0.14284.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:21] [ns_1@127.0.0.1:<0.14263.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:21] [ns_1@127.0.0.1:<0.14288.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:21] [ns_1@127.0.0.1:<0.14209.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:22] [ns_1@127.0.0.1:<0.14229.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:22] [ns_1@127.0.0.1:<0.14288.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:22] [ns_1@127.0.0.1:<0.14304.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:23] [ns_1@127.0.0.1:<0.14275.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:11:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.14288.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [ns_server:info] [2012-03-26 2:11:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [stats:error] [2012-03-26 2:11:23] [ns_1@127.0.0.1:<0.14225.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:24] [ns_1@127.0.0.1:<0.14244.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:24] [ns_1@127.0.0.1:<0.14316.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:25] [ns_1@127.0.0.1:<0.14291.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:11:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14296.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:11:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14331.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:11:25] [ns_1@127.0.0.1:<0.14242.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:26] [ns_1@127.0.0.1:<0.14256.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:26] [ns_1@127.0.0.1:<0.14328.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:27] [ns_1@127.0.0.1:<0.14309.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:27] [ns_1@127.0.0.1:<0.14254.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:28] [ns_1@127.0.0.1:<0.14271.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:28] [ns_1@127.0.0.1:<0.14343.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:29] [ns_1@127.0.0.1:<0.14322.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:29] [ns_1@127.0.0.1:<0.14357.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:29] [ns_1@127.0.0.1:<0.14269.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:30] [ns_1@127.0.0.1:<0.14282.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:11:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14331.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:11:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14366.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:11:30] [ns_1@127.0.0.1:<0.14357.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:30] [ns_1@127.0.0.1:<0.14353.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:31] [ns_1@127.0.0.1:<0.14334.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:31] [ns_1@127.0.0.1:<0.14357.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:31] [ns_1@127.0.0.1:<0.14280.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:32] [ns_1@127.0.0.1:<0.14298.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:32] [ns_1@127.0.0.1:<0.14357.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [stats:error] [2012-03-26 2:11:32] [ns_1@127.0.0.1:<0.14372.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:11:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.14357.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:11:35] [ns_1@127.0.0.1:<0.14347.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:11:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14366.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:11:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14391.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:11:35] [ns_1@127.0.0.1:<0.14361.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:36] [ns_1@127.0.0.1:<0.14293.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:36] [ns_1@127.0.0.1:<0.14314.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:37] [ns_1@127.0.0.1:<0.14392.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:37] [ns_1@127.0.0.1:<0.14377.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:38] [ns_1@127.0.0.1:<0.14311.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:38] [ns_1@127.0.0.1:<0.14326.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:39] [ns_1@127.0.0.1:<0.14406.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:39] [ns_1@127.0.0.1:<0.14397.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:39] [ns_1@127.0.0.1:<0.14421.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:40] [ns_1@127.0.0.1:<0.14324.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:11:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14391.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:11:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14427.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:11:40] [ns_1@127.0.0.1:<0.14341.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:40] [ns_1@127.0.0.1:<0.14421.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:41] [ns_1@127.0.0.1:<0.14421.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:42] [ns_1@127.0.0.1:<0.14421.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:11:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.14421.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 2:11:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14427.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:11:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14443.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:11:49] [ns_1@127.0.0.1:<0.14453.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:11:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14443.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:11:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14457.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:11:50] [ns_1@127.0.0.1:<0.14351.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:50] [ns_1@127.0.0.1:<0.14453.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:51] [ns_1@127.0.0.1:<0.14417.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:51] [ns_1@127.0.0.1:<0.14410.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:51] [ns_1@127.0.0.1:<0.14453.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:52] [ns_1@127.0.0.1:<0.14338.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:52] [ns_1@127.0.0.1:<0.14368.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:52] [ns_1@127.0.0.1:<0.14453.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:53] [ns_1@127.0.0.1:<0.14467.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:53] [ns_1@127.0.0.1:<0.14424.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:11:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.14453.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:11:54] [ns_1@127.0.0.1:<0.14349.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:54] [ns_1@127.0.0.1:<0.14383.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:55] [ns_1@127.0.0.1:<0.14479.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:11:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14457.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:11:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14492.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:11:55] [ns_1@127.0.0.1:<0.14472.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:56] [ns_1@127.0.0.1:<0.14363.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:56] [ns_1@127.0.0.1:<0.14388.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:57] [ns_1@127.0.0.1:<0.14493.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:57] [ns_1@127.0.0.1:<0.14485.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:58] [ns_1@127.0.0.1:<0.14379.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:58] [ns_1@127.0.0.1:<0.14404.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:59] [ns_1@127.0.0.1:<0.14506.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:11:59] [ns_1@127.0.0.1:<0.14499.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:11:59] [ns_1@127.0.0.1:<0.14536.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:11:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,753116,203481}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37949744}, {processes,10067264}, {processes_used,8441784}, {system,27882480}, {atom,1306681}, {atom_used,1284164}, {binary,341512}, {code,12859877}, {ets,2407584}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,4137}, {memory_data,{4040077312,4013465600,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26112 kB\nBuffers: 60944 kB\nCached: 3528860 kB\nSwapCached: 0 kB\nActive: 310236 kB\nInactive: 3441560 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26112 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 84 kB\nWriteback: 0 kB\nAnonPages: 162028 kB\nMapped: 24872 kB\nSlab: 134340 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582684 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3613552640}, {buffered_memory,62406656}, {free_memory,26738688}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{4132941,0}}, {context_switches,{1300965,0}}, {garbage_collection,{685790,938705356,0}}, {io,{{input,23887595},{output,48366903}}}, {reductions,{278833565,405671}}, {run_queue,0}, {runtime,{54960,90}}]}]}] [stats:error] [2012-03-26 2:12:00] [ns_1@127.0.0.1:<0.14402.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:12:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14492.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:12:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14543.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:12:00] [ns_1@127.0.0.1:<0.14415.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:00] [ns_1@127.0.0.1:<0.14536.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:01] [ns_1@127.0.0.1:<0.14531.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:01] [ns_1@127.0.0.1:<0.14536.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:01] [ns_1@127.0.0.1:<0.14510.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:02] [ns_1@127.0.0.1:<0.14412.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:02] [ns_1@127.0.0.1:<0.14536.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:02] [ns_1@127.0.0.1:<0.14462.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:03] [ns_1@127.0.0.1:<0.14553.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:12:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.14536.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:12:03] [ns_1@127.0.0.1:<0.14540.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:04] [ns_1@127.0.0.1:<0.14428.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:04] [ns_1@127.0.0.1:<0.14477.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:05] [ns_1@127.0.0.1:<0.14567.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:12:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14543.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:12:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14576.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:12:05] [ns_1@127.0.0.1:<0.14556.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:06] [ns_1@127.0.0.1:<0.14458.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:06] [ns_1@127.0.0.1:<0.14489.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:07] [ns_1@127.0.0.1:<0.14580.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:07] [ns_1@127.0.0.1:<0.14569.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:08] [ns_1@127.0.0.1:<0.14474.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:08] [ns_1@127.0.0.1:<0.14504.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:09] [ns_1@127.0.0.1:<0.14593.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:09] [ns_1@127.0.0.1:<0.14606.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:09] [ns_1@127.0.0.1:<0.14585.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:10] [ns_1@127.0.0.1:<0.14487.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:12:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14576.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:12:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14614.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:12:10] [ns_1@127.0.0.1:<0.14606.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:10] [ns_1@127.0.0.1:<0.14514.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:11] [ns_1@127.0.0.1:<0.14609.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:11] [ns_1@127.0.0.1:<0.14606.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:11] [ns_1@127.0.0.1:<0.14595.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:12] [ns_1@127.0.0.1:<0.14501.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:12] [ns_1@127.0.0.1:<0.14606.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:12] [ns_1@127.0.0.1:<0.14549.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:13] [ns_1@127.0.0.1:<0.14626.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:12:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.14606.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:12:13] [ns_1@127.0.0.1:<0.14611.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:14] [ns_1@127.0.0.1:<0.14512.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:14] [ns_1@127.0.0.1:<0.14562.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:15] [ns_1@127.0.0.1:<0.14638.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:12:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14614.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:12:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14648.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:12:15] [ns_1@127.0.0.1:<0.14628.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:16] [ns_1@127.0.0.1:<0.14544.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:16] [ns_1@127.0.0.1:<0.14573.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:17] [ns_1@127.0.0.1:<0.14651.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:18] [ns_1@127.0.0.1:<0.14640.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:18] [ns_1@127.0.0.1:<0.14559.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:19] [ns_1@127.0.0.1:<0.14589.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:19] [ns_1@127.0.0.1:<0.14664.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:19] [ns_1@127.0.0.1:<0.14674.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:20] [ns_1@127.0.0.1:<0.14655.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:12:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14648.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:12:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14680.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:12:20] [ns_1@127.0.0.1:<0.14571.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:20] [ns_1@127.0.0.1:<0.14674.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:21] [ns_1@127.0.0.1:<0.14600.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:21] [ns_1@127.0.0.1:<0.14677.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:21] [ns_1@127.0.0.1:<0.14674.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:22] [ns_1@127.0.0.1:<0.14666.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:22] [ns_1@127.0.0.1:<0.14587.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:22] [ns_1@127.0.0.1:<0.14674.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:23] [ns_1@127.0.0.1:<0.14620.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:23] [ns_1@127.0.0.1:<0.14695.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:12:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.14674.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:12:24] [ns_1@127.0.0.1:<0.14681.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:24] [ns_1@127.0.0.1:<0.14598.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:25] [ns_1@127.0.0.1:<0.14633.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:12:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14680.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:12:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14715.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:12:25] [ns_1@127.0.0.1:<0.14708.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:26] [ns_1@127.0.0.1:<0.14697.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:26] [ns_1@127.0.0.1:<0.14616.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:27] [ns_1@127.0.0.1:<0.14645.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:27] [ns_1@127.0.0.1:<0.14720.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:28] [ns_1@127.0.0.1:<0.14710.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:28] [ns_1@127.0.0.1:<0.14631.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:29] [ns_1@127.0.0.1:<0.14659.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:29] [ns_1@127.0.0.1:<0.14733.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:29] [ns_1@127.0.0.1:<0.14743.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:30] [ns_1@127.0.0.1:<0.14724.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:12:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14715.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:12:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14750.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:12:30] [ns_1@127.0.0.1:<0.14643.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:30] [ns_1@127.0.0.1:<0.14743.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:31] [ns_1@127.0.0.1:<0.14670.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:31] [ns_1@127.0.0.1:<0.14747.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:31] [ns_1@127.0.0.1:<0.14743.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:32] [ns_1@127.0.0.1:<0.14735.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:32] [ns_1@127.0.0.1:<0.14657.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:32] [ns_1@127.0.0.1:<0.14743.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:33] [ns_1@127.0.0.1:<0.14690.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:33] [ns_1@127.0.0.1:<0.14763.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:12:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.14743.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:12:34] [ns_1@127.0.0.1:<0.14751.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:34] [ns_1@127.0.0.1:<0.14668.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:35] [ns_1@127.0.0.1:<0.14702.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:12:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14750.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:12:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14783.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:12:35] [ns_1@127.0.0.1:<0.14776.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:36] [ns_1@127.0.0.1:<0.14765.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:36] [ns_1@127.0.0.1:<0.14685.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:37] [ns_1@127.0.0.1:<0.14716.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:37] [ns_1@127.0.0.1:<0.14792.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:38] [ns_1@127.0.0.1:<0.14778.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:38] [ns_1@127.0.0.1:<0.14700.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:39] [ns_1@127.0.0.1:<0.14729.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:39] [ns_1@127.0.0.1:<0.14811.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:39] [ns_1@127.0.0.1:<0.14802.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:40] [ns_1@127.0.0.1:<0.14794.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:12:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14783.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:12:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14819.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:12:40] [ns_1@127.0.0.1:<0.14811.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:40] [ns_1@127.0.0.1:<0.14712.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:41] [ns_1@127.0.0.1:<0.14739.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:41] [ns_1@127.0.0.1:<0.14811.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:41] [ns_1@127.0.0.1:<0.14816.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:42] [ns_1@127.0.0.1:<0.14804.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:42] [ns_1@127.0.0.1:<0.14811.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:42] [ns_1@127.0.0.1:<0.14727.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:43] [ns_1@127.0.0.1:<0.14758.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:12:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.14811.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:12:43] [ns_1@127.0.0.1:<0.14833.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:44] [ns_1@127.0.0.1:<0.14821.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:44] [ns_1@127.0.0.1:<0.14737.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:45] [ns_1@127.0.0.1:<0.14771.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:12:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14819.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:12:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14853.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:12:45] [ns_1@127.0.0.1:<0.14845.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:46] [ns_1@127.0.0.1:<0.14836.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:46] [ns_1@127.0.0.1:<0.14754.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:47] [ns_1@127.0.0.1:<0.14784.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:47] [ns_1@127.0.0.1:<0.14860.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:48] [ns_1@127.0.0.1:<0.14848.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:48] [ns_1@127.0.0.1:<0.14769.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:49] [ns_1@127.0.0.1:<0.14800.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:49] [ns_1@127.0.0.1:<0.14879.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:49] [ns_1@127.0.0.1:<0.14871.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:50] [ns_1@127.0.0.1:<0.14862.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:12:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14853.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:12:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14887.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:12:50] [ns_1@127.0.0.1:<0.14879.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:50] [ns_1@127.0.0.1:<0.14780.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:50] [ns_1@127.0.0.1:<0.14796.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:50] [ns_1@127.0.0.1:<0.14807.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:51] [ns_1@127.0.0.1:<0.14814.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:51] [ns_1@127.0.0.1:<0.14879.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:52] [ns_1@127.0.0.1:<0.14884.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:52] [ns_1@127.0.0.1:<0.14873.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:52] [ns_1@127.0.0.1:<0.14879.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:53] [ns_1@127.0.0.1:<0.14825.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:53] [ns_1@127.0.0.1:<0.14831.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:12:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.14879.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:12:54] [ns_1@127.0.0.1:<0.14906.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:54] [ns_1@127.0.0.1:<0.14890.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:55] [ns_1@127.0.0.1:<0.14838.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:12:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14887.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:12:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14924.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:12:55] [ns_1@127.0.0.1:<0.14843.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:56] [ns_1@127.0.0.1:<0.14919.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:56] [ns_1@127.0.0.1:<0.14892.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:57] [ns_1@127.0.0.1:<0.14850.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:57] [ns_1@127.0.0.1:<0.14856.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:58] [ns_1@127.0.0.1:<0.14933.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:58] [ns_1@127.0.0.1:<0.14894.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:59] [ns_1@127.0.0.1:<0.14864.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:12:59] [ns_1@127.0.0.1:<0.14869.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:12:59] [ns_1@127.0.0.1:<0.14953.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:12:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,753176,231339}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37903856}, {processes,9987808}, {processes_used,8362328}, {system,27916048}, {atom,1306681}, {atom_used,1284164}, {binary,337312}, {code,12859877}, {ets,2439104}]}, {system_stats, [{cpu_utilization_rate,25.25}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,4197}, {memory_data,{4040077312,4013338624,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25740 kB\nBuffers: 61072 kB\nCached: 3529020 kB\nSwapCached: 0 kB\nActive: 310404 kB\nInactive: 3441716 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25740 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 72 kB\nWriteback: 16 kB\nAnonPages: 162036 kB\nMapped: 24872 kB\nSlab: 134356 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582684 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3613716480}, {buffered_memory,62537728}, {free_memory,26357760}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{4192969,1}}, {context_switches,{1314342,0}}, {garbage_collection,{693051,950070554,0}}, {io,{{input,24172721},{output,49043111}}}, {reductions,{281462135,658848}}, {run_queue,0}, {runtime,{55550,130}}]}]}] [stats:error] [2012-03-26 2:13:00] [ns_1@127.0.0.1:<0.14944.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:13:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14924.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:13:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14960.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:13:00] [ns_1@127.0.0.1:<0.14909.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:00] [ns_1@127.0.0.1:<0.14953.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:01] [ns_1@127.0.0.1:<0.14875.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:01] [ns_1@127.0.0.1:<0.14882.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:01] [ns_1@127.0.0.1:<0.14953.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:02] [ns_1@127.0.0.1:<0.14961.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:02] [ns_1@127.0.0.1:<0.14921.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:02] [ns_1@127.0.0.1:<0.14953.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:03] [ns_1@127.0.0.1:<0.14899.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:03] [ns_1@127.0.0.1:<0.14904.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:13:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.14953.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:13:04] [ns_1@127.0.0.1:<0.14975.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:04] [ns_1@127.0.0.1:<0.14936.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:05] [ns_1@127.0.0.1:<0.14911.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:13:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14960.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:13:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.14993.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:13:05] [ns_1@127.0.0.1:<0.14917.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:06] [ns_1@127.0.0.1:<0.14988.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:06] [ns_1@127.0.0.1:<0.14946.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:07] [ns_1@127.0.0.1:<0.14925.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:07] [ns_1@127.0.0.1:<0.14929.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:08] [ns_1@127.0.0.1:<0.15004.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:08] [ns_1@127.0.0.1:<0.14964.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:09] [ns_1@127.0.0.1:<0.14938.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:09] [ns_1@127.0.0.1:<0.14942.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:09] [ns_1@127.0.0.1:<0.15025.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:10] [ns_1@127.0.0.1:<0.15014.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:13:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.14993.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:13:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15031.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:13:10] [ns_1@127.0.0.1:<0.14979.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:10] [ns_1@127.0.0.1:<0.15025.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:11] [ns_1@127.0.0.1:<0.14948.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:11] [ns_1@127.0.0.1:<0.15025.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:11] [ns_1@127.0.0.1:<0.14957.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:12] [ns_1@127.0.0.1:<0.15032.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:12] [ns_1@127.0.0.1:<0.15025.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:12] [ns_1@127.0.0.1:<0.14990.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:13] [ns_1@127.0.0.1:<0.14968.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:13:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.15025.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:13:13] [ns_1@127.0.0.1:<0.14973.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:14] [ns_1@127.0.0.1:<0.15048.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:14] [ns_1@127.0.0.1:<0.15006.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:15] [ns_1@127.0.0.1:<0.14981.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:13:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15031.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:13:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15065.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:13:15] [ns_1@127.0.0.1:<0.14986.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:16] [ns_1@127.0.0.1:<0.15060.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:16] [ns_1@127.0.0.1:<0.15017.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:17] [ns_1@127.0.0.1:<0.14994.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:17] [ns_1@127.0.0.1:<0.14999.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:18] [ns_1@127.0.0.1:<0.15074.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:18] [ns_1@127.0.0.1:<0.15037.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:19] [ns_1@127.0.0.1:<0.15008.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:19] [ns_1@127.0.0.1:<0.15091.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:19] [ns_1@127.0.0.1:<0.15012.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:20] [ns_1@127.0.0.1:<0.15085.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:13:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15065.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:13:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15099.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:13:20] [ns_1@127.0.0.1:<0.15091.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:20] [ns_1@127.0.0.1:<0.15050.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:21] [ns_1@127.0.0.1:<0.15021.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:21] [ns_1@127.0.0.1:<0.15091.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:21] [ns_1@127.0.0.1:<0.15028.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:22] [ns_1@127.0.0.1:<0.15102.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:22] [ns_1@127.0.0.1:<0.15091.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:22] [ns_1@127.0.0.1:<0.15062.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:23] [ns_1@127.0.0.1:<0.15042.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:13:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.15091.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:13:23] [ns_1@127.0.0.1:<0.15045.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:24] [ns_1@127.0.0.1:<0.15117.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:24] [ns_1@127.0.0.1:<0.15076.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:25] [ns_1@127.0.0.1:<0.15055.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:13:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15099.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:13:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15134.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:13:25] [ns_1@127.0.0.1:<0.15057.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:26] [ns_1@127.0.0.1:<0.15129.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:27] [ns_1@127.0.0.1:<0.15087.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:27] [ns_1@127.0.0.1:<0.15068.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:28] [ns_1@127.0.0.1:<0.15072.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:28] [ns_1@127.0.0.1:<0.15144.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:29] [ns_1@127.0.0.1:<0.15107.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:29] [ns_1@127.0.0.1:<0.15081.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:29] [ns_1@127.0.0.1:<0.15160.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:30] [ns_1@127.0.0.1:<0.15083.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:13:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15134.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:13:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15167.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:13:30] [ns_1@127.0.0.1:<0.15154.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:30] [ns_1@127.0.0.1:<0.15160.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:31] [ns_1@127.0.0.1:<0.15119.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:31] [ns_1@127.0.0.1:<0.15094.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:31] [ns_1@127.0.0.1:<0.15160.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:32] [ns_1@127.0.0.1:<0.15096.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:32] [ns_1@127.0.0.1:<0.15171.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:32] [ns_1@127.0.0.1:<0.15160.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:33] [ns_1@127.0.0.1:<0.15131.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:33] [ns_1@127.0.0.1:<0.15112.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:13:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.15160.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:13:34] [ns_1@127.0.0.1:<0.15114.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:34] [ns_1@127.0.0.1:<0.15186.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:35] [ns_1@127.0.0.1:<0.15146.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:13:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15167.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:13:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15200.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:13:35] [ns_1@127.0.0.1:<0.15125.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:36] [ns_1@127.0.0.1:<0.15127.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:36] [ns_1@127.0.0.1:<0.15197.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:37] [ns_1@127.0.0.1:<0.15156.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:37] [ns_1@127.0.0.1:<0.15137.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:38] [ns_1@127.0.0.1:<0.15141.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:38] [ns_1@127.0.0.1:<0.15213.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:39] [ns_1@127.0.0.1:<0.15175.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:39] [ns_1@127.0.0.1:<0.15150.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:39] [ns_1@127.0.0.1:<0.15230.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:40] [ns_1@127.0.0.1:<0.15152.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:13:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15200.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:13:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15236.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:13:40] [ns_1@127.0.0.1:<0.15224.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:40] [ns_1@127.0.0.1:<0.15230.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:41] [ns_1@127.0.0.1:<0.15188.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:41] [ns_1@127.0.0.1:<0.15164.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:41] [ns_1@127.0.0.1:<0.15230.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:42] [ns_1@127.0.0.1:<0.15168.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:42] [ns_1@127.0.0.1:<0.15240.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:42] [ns_1@127.0.0.1:<0.15230.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:43] [ns_1@127.0.0.1:<0.15201.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:43] [ns_1@127.0.0.1:<0.15180.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:13:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.15230.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:13:44] [ns_1@127.0.0.1:<0.15182.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:44] [ns_1@127.0.0.1:<0.15255.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:45] [ns_1@127.0.0.1:<0.15215.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:13:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15236.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:13:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15270.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:13:45] [ns_1@127.0.0.1:<0.15193.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:46] [ns_1@127.0.0.1:<0.15195.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:46] [ns_1@127.0.0.1:<0.15267.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:47] [ns_1@127.0.0.1:<0.15226.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:47] [ns_1@127.0.0.1:<0.15206.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:48] [ns_1@127.0.0.1:<0.15211.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:48] [ns_1@127.0.0.1:<0.15281.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:49] [ns_1@127.0.0.1:<0.15244.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:49] [ns_1@127.0.0.1:<0.15296.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:49] [ns_1@127.0.0.1:<0.15219.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:50] [ns_1@127.0.0.1:<0.15221.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:13:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15270.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:13:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15304.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:13:50] [ns_1@127.0.0.1:<0.15296.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:50] [ns_1@127.0.0.1:<0.15292.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:51] [ns_1@127.0.0.1:<0.15257.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:51] [ns_1@127.0.0.1:<0.15273.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:51] [ns_1@127.0.0.1:<0.15285.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:51] [ns_1@127.0.0.1:<0.15296.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:51] [ns_1@127.0.0.1:<0.15233.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:52] [ns_1@127.0.0.1:<0.15237.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:52] [ns_1@127.0.0.1:<0.15296.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:52] [ns_1@127.0.0.1:<0.15312.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:53] [ns_1@127.0.0.1:<0.15299.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:13:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.15296.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:13:53] [ns_1@127.0.0.1:<0.15250.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:54] [ns_1@127.0.0.1:<0.15252.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:54] [ns_1@127.0.0.1:<0.15314.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:55] [ns_1@127.0.0.1:<0.15321.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:13:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15304.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:13:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15343.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:13:55] [ns_1@127.0.0.1:<0.15262.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:56] [ns_1@127.0.0.1:<0.15264.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:56] [ns_1@127.0.0.1:<0.15316.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:57] [ns_1@127.0.0.1:<0.15334.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:57] [ns_1@127.0.0.1:<0.15277.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:58] [ns_1@127.0.0.1:<0.15279.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:58] [ns_1@127.0.0.1:<0.15328.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:13:59] [ns_1@127.0.0.1:<0.15346.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:13:59] [ns_1@127.0.0.1:<0.15370.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:13:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,753236,259400}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38006816}, {processes,10100000}, {processes_used,8474520}, {system,27906816}, {atom,1306681}, {atom_used,1284164}, {binary,349752}, {code,12859877}, {ets,2410304}]}, {system_stats, [{cpu_utilization_rate,25.25}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,4257}, {memory_data,{4040077312,4013719552,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25492 kB\nBuffers: 61152 kB\nCached: 3529164 kB\nSwapCached: 0 kB\nActive: 310596 kB\nInactive: 3441764 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25492 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 80 kB\nWriteback: 0 kB\nAnonPages: 162048 kB\nMapped: 24872 kB\nSlab: 134348 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580416 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3613863936}, {buffered_memory,62619648}, {free_memory,26103808}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{4252997,1}}, {context_switches,{1327278,0}}, {garbage_collection,{700199,961096306,0}}, {io,{{input,24203328},{output,49482381}}}, {reductions,{284031554,607294}}, {run_queue,0}, {runtime,{56150,150}}]}]}] [stats:error] [2012-03-26 2:14:00] [ns_1@127.0.0.1:<0.15288.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:00] [ns_1@127.0.0.1:<0.15290.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:14:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15343.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:14:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15379.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:14:00] [ns_1@127.0.0.1:<0.15370.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:01] [ns_1@127.0.0.1:<0.15340.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:01] [ns_1@127.0.0.1:<0.15359.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:01] [ns_1@127.0.0.1:<0.15370.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:02] [ns_1@127.0.0.1:<0.15301.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:02] [ns_1@127.0.0.1:<0.15307.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:02] [ns_1@127.0.0.1:<0.15370.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:03] [ns_1@127.0.0.1:<0.15355.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:03] [ns_1@127.0.0.1:<0.15374.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:14:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.15370.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:14:04] [ns_1@127.0.0.1:<0.15323.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:04] [ns_1@127.0.0.1:<0.15326.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:05] [ns_1@127.0.0.1:<0.15365.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:14:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15379.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:14:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15410.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:14:05] [ns_1@127.0.0.1:<0.15390.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:06] [ns_1@127.0.0.1:<0.15336.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:06] [ns_1@127.0.0.1:<0.15338.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:07] [ns_1@127.0.0.1:<0.15385.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:07] [ns_1@127.0.0.1:<0.15403.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:08] [ns_1@127.0.0.1:<0.15350.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:08] [ns_1@127.0.0.1:<0.15353.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:09] [ns_1@127.0.0.1:<0.15398.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:09] [ns_1@127.0.0.1:<0.15416.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:09] [ns_1@127.0.0.1:<0.15442.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:10] [ns_1@127.0.0.1:<0.15361.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:14:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15410.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:14:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15448.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:14:10] [ns_1@127.0.0.1:<0.15363.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:10] [ns_1@127.0.0.1:<0.15442.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:11] [ns_1@127.0.0.1:<0.15411.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:11] [ns_1@127.0.0.1:<0.15429.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:11] [ns_1@127.0.0.1:<0.15442.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:12] [ns_1@127.0.0.1:<0.15376.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:12] [ns_1@127.0.0.1:<0.15381.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:12] [ns_1@127.0.0.1:<0.15442.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:13] [ns_1@127.0.0.1:<0.15425.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:13] [ns_1@127.0.0.1:<0.15445.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:14:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.15442.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:14:14] [ns_1@127.0.0.1:<0.15392.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:14] [ns_1@127.0.0.1:<0.15396.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:15] [ns_1@127.0.0.1:<0.15436.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:14:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15448.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:14:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15482.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:14:15] [ns_1@127.0.0.1:<0.15462.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:16] [ns_1@127.0.0.1:<0.15405.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:16] [ns_1@127.0.0.1:<0.15407.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:17] [ns_1@127.0.0.1:<0.15456.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:17] [ns_1@127.0.0.1:<0.15474.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:18] [ns_1@127.0.0.1:<0.15421.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:18] [ns_1@127.0.0.1:<0.15423.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:19] [ns_1@127.0.0.1:<0.15469.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:19] [ns_1@127.0.0.1:<0.15509.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:19] [ns_1@127.0.0.1:<0.15487.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:20] [ns_1@127.0.0.1:<0.15431.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:14:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15482.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:14:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15516.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:14:20] [ns_1@127.0.0.1:<0.15509.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:20] [ns_1@127.0.0.1:<0.15434.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:21] [ns_1@127.0.0.1:<0.15483.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:21] [ns_1@127.0.0.1:<0.15509.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:21] [ns_1@127.0.0.1:<0.15500.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:22] [ns_1@127.0.0.1:<0.15449.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:22] [ns_1@127.0.0.1:<0.15509.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:22] [ns_1@127.0.0.1:<0.15452.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:23] [ns_1@127.0.0.1:<0.15495.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:14:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.15509.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:14:23] [ns_1@127.0.0.1:<0.15513.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:24] [ns_1@127.0.0.1:<0.15464.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:24] [ns_1@127.0.0.1:<0.15467.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:25] [ns_1@127.0.0.1:<0.15508.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:14:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15516.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:14:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15551.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:14:25] [ns_1@127.0.0.1:<0.15531.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:26] [ns_1@127.0.0.1:<0.15476.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:26] [ns_1@127.0.0.1:<0.15479.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:27] [ns_1@127.0.0.1:<0.15529.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:27] [ns_1@127.0.0.1:<0.15544.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:28] [ns_1@127.0.0.1:<0.15491.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:28] [ns_1@127.0.0.1:<0.15493.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:29] [ns_1@127.0.0.1:<0.15542.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:29] [ns_1@127.0.0.1:<0.15577.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:29] [ns_1@127.0.0.1:<0.15558.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:30] [ns_1@127.0.0.1:<0.15502.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:14:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15551.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:14:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15586.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:14:30] [ns_1@127.0.0.1:<0.15577.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:30] [ns_1@127.0.0.1:<0.15504.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:31] [ns_1@127.0.0.1:<0.15554.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:31] [ns_1@127.0.0.1:<0.15577.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:31] [ns_1@127.0.0.1:<0.15569.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:32] [ns_1@127.0.0.1:<0.15518.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:32] [ns_1@127.0.0.1:<0.15577.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:32] [ns_1@127.0.0.1:<0.15524.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:33] [ns_1@127.0.0.1:<0.15567.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:14:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.15577.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:14:33] [ns_1@127.0.0.1:<0.15583.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:34] [ns_1@127.0.0.1:<0.15534.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:34] [ns_1@127.0.0.1:<0.15536.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:35] [ns_1@127.0.0.1:<0.15581.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:14:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15586.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:14:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15619.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:14:36] [ns_1@127.0.0.1:<0.15599.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:36] [ns_1@127.0.0.1:<0.15546.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:37] [ns_1@127.0.0.1:<0.15548.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:37] [ns_1@127.0.0.1:<0.15597.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:38] [ns_1@127.0.0.1:<0.15612.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:38] [ns_1@127.0.0.1:<0.15561.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:39] [ns_1@127.0.0.1:<0.15563.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:39] [ns_1@127.0.0.1:<0.15610.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:39] [ns_1@127.0.0.1:<0.15647.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:40] [ns_1@127.0.0.1:<0.15628.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:14:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15619.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:14:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15653.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:14:40] [ns_1@127.0.0.1:<0.15571.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:40] [ns_1@127.0.0.1:<0.15647.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:41] [ns_1@127.0.0.1:<0.15573.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:41] [ns_1@127.0.0.1:<0.15623.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:41] [ns_1@127.0.0.1:<0.15647.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:42] [ns_1@127.0.0.1:<0.15638.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:42] [ns_1@127.0.0.1:<0.15588.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:42] [ns_1@127.0.0.1:<0.15647.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:43] [ns_1@127.0.0.1:<0.15592.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:43] [ns_1@127.0.0.1:<0.15636.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:14:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.15647.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:14:44] [ns_1@127.0.0.1:<0.15654.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:44] [ns_1@127.0.0.1:<0.15603.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:45] [ns_1@127.0.0.1:<0.15605.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:14:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15653.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:14:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15687.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:14:45] [ns_1@127.0.0.1:<0.15650.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:46] [ns_1@127.0.0.1:<0.15669.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:46] [ns_1@127.0.0.1:<0.15614.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:47] [ns_1@127.0.0.1:<0.15616.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:47] [ns_1@127.0.0.1:<0.15667.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:48] [ns_1@127.0.0.1:<0.15681.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:48] [ns_1@127.0.0.1:<0.15630.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:49] [ns_1@127.0.0.1:<0.15632.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:49] [ns_1@127.0.0.1:<0.15679.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:49] [ns_1@127.0.0.1:<0.15715.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:50] [ns_1@127.0.0.1:<0.15696.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:14:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15687.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:14:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15721.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:14:50] [ns_1@127.0.0.1:<0.15641.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:50] [ns_1@127.0.0.1:<0.15715.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:51] [ns_1@127.0.0.1:<0.15643.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:51] [ns_1@127.0.0.1:<0.15692.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:51] [ns_1@127.0.0.1:<0.15705.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:51] [ns_1@127.0.0.1:<0.15718.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:51] [ns_1@127.0.0.1:<0.15715.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:52] [ns_1@127.0.0.1:<0.15707.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:52] [ns_1@127.0.0.1:<0.15657.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:52] [ns_1@127.0.0.1:<0.15715.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:53] [ns_1@127.0.0.1:<0.15661.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:53] [ns_1@127.0.0.1:<0.15740.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:14:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.15715.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:14:54] [ns_1@127.0.0.1:<0.15722.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:54] [ns_1@127.0.0.1:<0.15672.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:55] [ns_1@127.0.0.1:<0.15674.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:14:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15721.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:14:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15760.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:14:55] [ns_1@127.0.0.1:<0.15753.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:56] [ns_1@127.0.0.1:<0.15742.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:56] [ns_1@127.0.0.1:<0.15684.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:57] [ns_1@127.0.0.1:<0.15688.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:57] [ns_1@127.0.0.1:<0.15767.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:58] [ns_1@127.0.0.1:<0.15755.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:58] [ns_1@127.0.0.1:<0.15698.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:14:59] [ns_1@127.0.0.1:<0.15700.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:14:59] [ns_1@127.0.0.1:<0.15800.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:14:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,753296,287332}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37836064}, {processes,9905320}, {processes_used,8279840}, {system,27930744}, {atom,1306681}, {atom_used,1284164}, {binary,337984}, {code,12859877}, {ets,2439648}]}, {system_stats, [{cpu_utilization_rate,25.56390977443609}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,4318}, {memory_data,{4040077312,4013973504,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27228 kB\nBuffers: 61200 kB\nCached: 3529332 kB\nSwapCached: 0 kB\nActive: 309048 kB\nInactive: 3441828 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27228 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 48 kB\nWriteback: 0 kB\nAnonPages: 160412 kB\nMapped: 24872 kB\nSlab: 134360 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577712 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3614035968}, {buffered_memory,62668800}, {free_memory,27881472}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{4313023,0}}, {context_switches,{1340346,0}}, {garbage_collection,{707324,972379106,0}}, {io,{{input,24233944},{output,49927311}}}, {reductions,{286635825,620761}}, {run_queue,0}, {runtime,{56770,170}}]}]}] [stats:error] [2012-03-26 2:14:59] [ns_1@127.0.0.1:<0.15778.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:00] [ns_1@127.0.0.1:<0.15770.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:15:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15760.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:15:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15809.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:15:00] [ns_1@127.0.0.1:<0.15800.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:00] [ns_1@127.0.0.1:<0.15709.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:01] [ns_1@127.0.0.1:<0.15711.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:01] [ns_1@127.0.0.1:<0.15800.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:01] [ns_1@127.0.0.1:<0.15806.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:02] [ns_1@127.0.0.1:<0.15780.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:02] [ns_1@127.0.0.1:<0.15800.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:02] [ns_1@127.0.0.1:<0.15726.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:03] [ns_1@127.0.0.1:<0.15731.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:15:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.15800.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:15:03] [ns_1@127.0.0.1:<0.15822.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:04] [ns_1@127.0.0.1:<0.15811.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:04] [ns_1@127.0.0.1:<0.15745.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:05] [ns_1@127.0.0.1:<0.15733.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:15:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15809.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:15:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15842.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:15:05] [ns_1@127.0.0.1:<0.15835.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:06] [ns_1@127.0.0.1:<0.15826.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:06] [ns_1@127.0.0.1:<0.15757.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:07] [ns_1@127.0.0.1:<0.15735.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:07] [ns_1@127.0.0.1:<0.15851.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:08] [ns_1@127.0.0.1:<0.15837.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:09] [ns_1@127.0.0.1:<0.15772.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:09] [ns_1@127.0.0.1:<0.15749.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:09] [ns_1@127.0.0.1:<0.15872.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:10] [ns_1@127.0.0.1:<0.15861.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:10] [ns_1@127.0.0.1:<0.15853.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:15:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15842.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:15:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15880.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:15:10] [ns_1@127.0.0.1:<0.15872.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:11] [ns_1@127.0.0.1:<0.15782.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:11] [ns_1@127.0.0.1:<0.15763.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:11] [ns_1@127.0.0.1:<0.15872.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:12] [ns_1@127.0.0.1:<0.15877.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:12] [ns_1@127.0.0.1:<0.15864.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:12] [ns_1@127.0.0.1:<0.15872.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:13] [ns_1@127.0.0.1:<0.15815.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:13] [ns_1@127.0.0.1:<0.15776.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:15:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.15872.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:15:14] [ns_1@127.0.0.1:<0.15894.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:14] [ns_1@127.0.0.1:<0.15882.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:15] [ns_1@127.0.0.1:<0.15828.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:15:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15880.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:15:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15912.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:15:15] [ns_1@127.0.0.1:<0.15804.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:16] [ns_1@127.0.0.1:<0.15906.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:16] [ns_1@127.0.0.1:<0.15897.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:17] [ns_1@127.0.0.1:<0.15839.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:17] [ns_1@127.0.0.1:<0.15820.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:18] [ns_1@127.0.0.1:<0.15921.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:18] [ns_1@127.0.0.1:<0.15909.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:19] [ns_1@127.0.0.1:<0.15855.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:19] [ns_1@127.0.0.1:<0.15833.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:19] [ns_1@127.0.0.1:<0.15940.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:20] [ns_1@127.0.0.1:<0.15932.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:15:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15912.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:15:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15946.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:15:20] [ns_1@127.0.0.1:<0.15923.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:20] [ns_1@127.0.0.1:<0.15940.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:21] [ns_1@127.0.0.1:<0.15866.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:21] [ns_1@127.0.0.1:<0.15846.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:21] [ns_1@127.0.0.1:<0.15940.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:22] [ns_1@127.0.0.1:<0.15947.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:22] [ns_1@127.0.0.1:<0.15934.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:22] [ns_1@127.0.0.1:<0.15940.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:23] [ns_1@127.0.0.1:<0.15886.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:23] [ns_1@127.0.0.1:<0.15859.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:15:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.15940.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:15:24] [ns_1@127.0.0.1:<0.15963.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:24] [ns_1@127.0.0.1:<0.15951.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:25] [ns_1@127.0.0.1:<0.15899.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:15:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15946.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:15:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.15981.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:15:25] [ns_1@127.0.0.1:<0.15875.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:26] [ns_1@127.0.0.1:<0.15976.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:26] [ns_1@127.0.0.1:<0.15966.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:27] [ns_1@127.0.0.1:<0.15913.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:27] [ns_1@127.0.0.1:<0.15892.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:28] [ns_1@127.0.0.1:<0.15990.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:28] [ns_1@127.0.0.1:<0.15978.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:29] [ns_1@127.0.0.1:<0.15925.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:29] [ns_1@127.0.0.1:<0.16007.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:29] [ns_1@127.0.0.1:<0.15904.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:30] [ns_1@127.0.0.1:<0.16001.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:15:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.15981.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:15:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16016.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:15:30] [ns_1@127.0.0.1:<0.16007.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:30] [ns_1@127.0.0.1:<0.15993.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:31] [ns_1@127.0.0.1:<0.15936.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:31] [ns_1@127.0.0.1:<0.16007.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:31] [ns_1@127.0.0.1:<0.15917.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:32] [ns_1@127.0.0.1:<0.16018.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:32] [ns_1@127.0.0.1:<0.16007.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:32] [ns_1@127.0.0.1:<0.16003.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:33] [ns_1@127.0.0.1:<0.15956.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:15:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.16007.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:15:33] [ns_1@127.0.0.1:<0.15930.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:34] [ns_1@127.0.0.1:<0.16033.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:34] [ns_1@127.0.0.1:<0.16022.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:35] [ns_1@127.0.0.1:<0.15968.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:15:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16016.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:15:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16049.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:15:35] [ns_1@127.0.0.1:<0.15943.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:36] [ns_1@127.0.0.1:<0.16044.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:36] [ns_1@127.0.0.1:<0.16035.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:37] [ns_1@127.0.0.1:<0.15982.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:37] [ns_1@127.0.0.1:<0.15961.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:38] [ns_1@127.0.0.1:<0.16060.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:38] [ns_1@127.0.0.1:<0.16046.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:39] [ns_1@127.0.0.1:<0.15997.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:39] [ns_1@127.0.0.1:<0.16077.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:39] [ns_1@127.0.0.1:<0.15974.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:40] [ns_1@127.0.0.1:<0.16071.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:15:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16049.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:15:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16085.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:15:40] [ns_1@127.0.0.1:<0.16077.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:40] [ns_1@127.0.0.1:<0.16062.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:41] [ns_1@127.0.0.1:<0.16010.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:41] [ns_1@127.0.0.1:<0.16077.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:41] [ns_1@127.0.0.1:<0.15986.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:42] [ns_1@127.0.0.1:<0.16087.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:42] [ns_1@127.0.0.1:<0.16077.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:42] [ns_1@127.0.0.1:<0.16073.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:43] [ns_1@127.0.0.1:<0.16027.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:15:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.16077.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:15:44] [ns_1@127.0.0.1:<0.15999.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:44] [ns_1@127.0.0.1:<0.16102.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:45] [ns_1@127.0.0.1:<0.16091.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:45] [ns_1@127.0.0.1:<0.16040.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:15:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16085.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:15:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16119.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:15:46] [ns_1@127.0.0.1:<0.16013.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:46] [ns_1@127.0.0.1:<0.16114.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:47] [ns_1@127.0.0.1:<0.16104.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:47] [ns_1@127.0.0.1:<0.16053.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:48] [ns_1@127.0.0.1:<0.16029.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:48] [ns_1@127.0.0.1:<0.16128.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:49] [ns_1@127.0.0.1:<0.16116.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:49] [ns_1@127.0.0.1:<0.16066.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:49] [ns_1@127.0.0.1:<0.16145.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:50] [ns_1@127.0.0.1:<0.16042.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:15:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16119.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:15:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16151.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:15:50] [ns_1@127.0.0.1:<0.16139.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:50] [ns_1@127.0.0.1:<0.16145.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:51] [ns_1@127.0.0.1:<0.16130.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:51] [ns_1@127.0.0.1:<0.16080.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:51] [ns_1@127.0.0.1:<0.16145.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:52] [ns_1@127.0.0.1:<0.16058.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:52] [ns_1@127.0.0.1:<0.16068.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:52] [ns_1@127.0.0.1:<0.16082.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:52] [ns_1@127.0.0.1:<0.16099.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:52] [ns_1@127.0.0.1:<0.16111.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:52] [ns_1@127.0.0.1:<0.16156.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:52] [ns_1@127.0.0.1:<0.16145.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:53] [ns_1@127.0.0.1:<0.16141.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:53] [ns_1@127.0.0.1:<0.16097.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:15:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.16145.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:15:54] [ns_1@127.0.0.1:<0.16126.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:54] [ns_1@127.0.0.1:<0.16179.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:55] [ns_1@127.0.0.1:<0.16161.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:15:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16151.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:15:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16194.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:15:55] [ns_1@127.0.0.1:<0.16109.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:56] [ns_1@127.0.0.1:<0.16137.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:56] [ns_1@127.0.0.1:<0.16191.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:57] [ns_1@127.0.0.1:<0.16181.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:57] [ns_1@127.0.0.1:<0.16122.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:58] [ns_1@127.0.0.1:<0.16152.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:58] [ns_1@127.0.0.1:<0.16206.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:59] [ns_1@127.0.0.1:<0.16195.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:15:59] [ns_1@127.0.0.1:<0.16135.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:15:59] [ns_1@127.0.0.1:<0.16223.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:15:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,753356,314257}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,37797672}, {processes,9886312}, {processes_used,8260832}, {system,27911360}, {atom,1306681}, {atom_used,1284164}, {binary,338872}, {code,12859877}, {ets,2413344}]}, {system_stats, [{cpu_utilization_rate,25.19083969465649}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,4378}, {memory_data,{4040077312,4012195840,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26732 kB\nBuffers: 61316 kB\nCached: 3529032 kB\nSwapCached: 0 kB\nActive: 308996 kB\nInactive: 3441744 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26732 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 76 kB\nWriteback: 0 kB\nAnonPages: 160388 kB\nMapped: 24872 kB\nSlab: 134340 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577516 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3613728768}, {buffered_memory,62787584}, {free_memory,27373568}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{4373050,0}}, {context_switches,{1354059,0}}, {garbage_collection,{715055,983876703,0}}, {io,{{input,24510261},{output,50961417}}}, {reductions,{289654667,640676}}, {run_queue,0}, {runtime,{57430,150}}]}]}] [stats:error] [2012-03-26 2:16:00] [ns_1@127.0.0.1:<0.16176.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:16:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16194.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:16:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16230.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:16:00] [ns_1@127.0.0.1:<0.16216.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:00] [ns_1@127.0.0.1:<0.16223.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:01] [ns_1@127.0.0.1:<0.16208.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:01] [ns_1@127.0.0.1:<0.16223.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:01] [ns_1@127.0.0.1:<0.16148.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:02] [ns_1@127.0.0.1:<0.16189.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:02] [ns_1@127.0.0.1:<0.16223.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:02] [ns_1@127.0.0.1:<0.16234.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:03] [ns_1@127.0.0.1:<0.16219.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:16:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.16223.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:16:03] [ns_1@127.0.0.1:<0.16166.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:04] [ns_1@127.0.0.1:<0.16203.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:04] [ns_1@127.0.0.1:<0.16249.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:05] [ns_1@127.0.0.1:<0.16241.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:16:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16230.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:16:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16263.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:16:05] [ns_1@127.0.0.1:<0.16168.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:06] [ns_1@127.0.0.1:<0.16214.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:06] [ns_1@127.0.0.1:<0.16260.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:07] [ns_1@127.0.0.1:<0.16254.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:07] [ns_1@127.0.0.1:<0.16170.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:08] [ns_1@127.0.0.1:<0.16231.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:08] [ns_1@127.0.0.1:<0.16276.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:09] [ns_1@127.0.0.1:<0.16267.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:09] [ns_1@127.0.0.1:<0.16293.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:09] [ns_1@127.0.0.1:<0.16172.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:10] [ns_1@127.0.0.1:<0.16246.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:16:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16263.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:16:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16301.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:16:10] [ns_1@127.0.0.1:<0.16293.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:10] [ns_1@127.0.0.1:<0.16287.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:11] [ns_1@127.0.0.1:<0.16280.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:11] [ns_1@127.0.0.1:<0.16293.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:11] [ns_1@127.0.0.1:<0.16174.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:12] [ns_1@127.0.0.1:<0.16258.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:12] [ns_1@127.0.0.1:<0.16293.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:12] [ns_1@127.0.0.1:<0.16307.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:13] [ns_1@127.0.0.1:<0.16296.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:16:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.16293.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:16:13] [ns_1@127.0.0.1:<0.16187.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:14] [ns_1@127.0.0.1:<0.16274.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:14] [ns_1@127.0.0.1:<0.16320.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:15] [ns_1@127.0.0.1:<0.16313.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:16:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16301.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:16:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16335.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:16:15] [ns_1@127.0.0.1:<0.16199.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:16] [ns_1@127.0.0.1:<0.16285.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:17] [ns_1@127.0.0.1:<0.16332.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:17] [ns_1@127.0.0.1:<0.16325.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:18] [ns_1@127.0.0.1:<0.16212.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:18] [ns_1@127.0.0.1:<0.16303.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:19] [ns_1@127.0.0.1:<0.16346.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:19] [ns_1@127.0.0.1:<0.16338.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:19] [ns_1@127.0.0.1:<0.16361.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:20] [ns_1@127.0.0.1:<0.16227.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:20] [ns_1@127.0.0.1:<0.16318.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:16:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16335.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:16:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16369.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:16:20] [ns_1@127.0.0.1:<0.16361.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:21] [ns_1@127.0.0.1:<0.16357.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:21] [ns_1@127.0.0.1:<0.16351.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:21] [ns_1@127.0.0.1:<0.16361.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:22] [ns_1@127.0.0.1:<0.16243.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:22] [ns_1@127.0.0.1:<0.16330.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:22] [ns_1@127.0.0.1:<0.16361.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:23] [ns_1@127.0.0.1:<0.16377.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:23] [ns_1@127.0.0.1:<0.16364.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:16:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.16361.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:16:24] [ns_1@127.0.0.1:<0.16256.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:24] [ns_1@127.0.0.1:<0.16344.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:25] [ns_1@127.0.0.1:<0.16389.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:16:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16369.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:16:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16402.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:16:25] [ns_1@127.0.0.1:<0.16382.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:26] [ns_1@127.0.0.1:<0.16272.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:26] [ns_1@127.0.0.1:<0.16355.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:27] [ns_1@127.0.0.1:<0.16403.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:27] [ns_1@127.0.0.1:<0.16395.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:28] [ns_1@127.0.0.1:<0.16282.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:28] [ns_1@127.0.0.1:<0.16372.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:29] [ns_1@127.0.0.1:<0.16416.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:29] [ns_1@127.0.0.1:<0.16407.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:29] [ns_1@127.0.0.1:<0.16430.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:30] [ns_1@127.0.0.1:<0.16298.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:16:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16402.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:16:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16437.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:16:30] [ns_1@127.0.0.1:<0.16387.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:30] [ns_1@127.0.0.1:<0.16430.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:31] [ns_1@127.0.0.1:<0.16426.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:31] [ns_1@127.0.0.1:<0.16420.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:31] [ns_1@127.0.0.1:<0.16430.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:32] [ns_1@127.0.0.1:<0.16315.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:32] [ns_1@127.0.0.1:<0.16399.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:32] [ns_1@127.0.0.1:<0.16430.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:33] [ns_1@127.0.0.1:<0.16445.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:33] [ns_1@127.0.0.1:<0.16434.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:16:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.16430.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:16:34] [ns_1@127.0.0.1:<0.16327.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:34] [ns_1@127.0.0.1:<0.16414.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:35] [ns_1@127.0.0.1:<0.16458.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:16:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16437.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:16:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16470.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:16:35] [ns_1@127.0.0.1:<0.16450.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:36] [ns_1@127.0.0.1:<0.16342.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:36] [ns_1@127.0.0.1:<0.16424.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:37] [ns_1@127.0.0.1:<0.16473.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:37] [ns_1@127.0.0.1:<0.16463.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:38] [ns_1@127.0.0.1:<0.16353.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:38] [ns_1@127.0.0.1:<0.16441.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:39] [ns_1@127.0.0.1:<0.16487.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:39] [ns_1@127.0.0.1:<0.16498.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:39] [ns_1@127.0.0.1:<0.16476.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:40] [ns_1@127.0.0.1:<0.16366.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:16:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16470.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:16:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16506.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:16:40] [ns_1@127.0.0.1:<0.16498.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:40] [ns_1@127.0.0.1:<0.16456.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:41] [ns_1@127.0.0.1:<0.16501.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:41] [ns_1@127.0.0.1:<0.16498.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:41] [ns_1@127.0.0.1:<0.16489.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:42] [ns_1@127.0.0.1:<0.16384.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:42] [ns_1@127.0.0.1:<0.16498.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:42] [ns_1@127.0.0.1:<0.16467.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:43] [ns_1@127.0.0.1:<0.16518.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:16:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.16498.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:16:43] [ns_1@127.0.0.1:<0.16503.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:44] [ns_1@127.0.0.1:<0.16397.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:44] [ns_1@127.0.0.1:<0.16483.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:45] [ns_1@127.0.0.1:<0.16530.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:16:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16506.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:16:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16540.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:16:45] [ns_1@127.0.0.1:<0.16520.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:46] [ns_1@127.0.0.1:<0.16411.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:46] [ns_1@127.0.0.1:<0.16494.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:47] [ns_1@127.0.0.1:<0.16543.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:47] [ns_1@127.0.0.1:<0.16532.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:48] [ns_1@127.0.0.1:<0.16422.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:48] [ns_1@127.0.0.1:<0.16512.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:49] [ns_1@127.0.0.1:<0.16556.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:49] [ns_1@127.0.0.1:<0.16566.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:49] [ns_1@127.0.0.1:<0.16547.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:50] [ns_1@127.0.0.1:<0.16438.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:16:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16540.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:16:50] [ns_1@127.0.0.1:<0.16566.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:info] [2012-03-26 2:16:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16574.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:16:51] [ns_1@127.0.0.1:<0.16566.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:52] [ns_1@127.0.0.1:<0.16558.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:52] [ns_1@127.0.0.1:<0.16452.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:52] [ns_1@127.0.0.1:<0.16492.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:52] [ns_1@127.0.0.1:<0.16508.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:52] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason shutdown [ns_server:info] [2012-03-26 2:16:52] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 2:16:52] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:16:52] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:16:52] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 2:16:52] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:warn] [2012-03-26 2:16:57] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 2:16:57] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 2:16:57] [ns_1@127.0.0.1:<0.16523.1>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 2:16:57] [ns_1@127.0.0.1:<0.16585.1>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 2:16:57] [ns_1@127.0.0.1:<0.16622.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:57] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 2:16:57] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 2:16:57] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:16:57] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:16:57] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 2:16:57] [ns_1@127.0.0.1:<0.16587.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:57] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 2:16:57] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:16:57] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:16:57] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 2:16:57] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 02:16:59: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 2:16:58] [ns_1@127.0.0.1:<0.16590.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:58] [ns_1@127.0.0.1:<0.16622.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:58] [ns_1@127.0.0.1:<0.16535.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:59] [ns_1@127.0.0.1:<0.16525.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:59] [ns_1@127.0.0.1:<0.16622.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:16:59] [ns_1@127.0.0.1:<0.16634.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:16:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.16622.1>} [ns_doctor:info] [2012-03-26 2:16:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,753416,341412}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38131944}, {processes,10180608}, {processes_used,8558048}, {system,27951336}, {atom,1306681}, {atom_used,1284164}, {binary,341672}, {code,12859877}, {ets,2443232}]}, {system_stats, [{cpu_utilization_rate,25.25}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,4438}, {memory_data,{4040077312,4012703744,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26724 kB\nBuffers: 61364 kB\nCached: 3529220 kB\nSwapCached: 0 kB\nActive: 309096 kB\nInactive: 3441864 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26724 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 76 kB\nWriteback: 16 kB\nAnonPages: 160396 kB\nMapped: 24872 kB\nSlab: 134356 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 577516 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3613921280}, {buffered_memory,62836736}, {free_memory,27365376}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{4433077,0}}, {context_switches,{1367083,0}}, {garbage_collection,{722057,995074699,0}}, {io,{{input,24540856},{output,51406660}}}, {reductions,{292227681,586717}}, {run_queue,0}, {runtime,{58090,170}}]}]}] [stats:error] [2012-03-26 2:17:00] [ns_1@127.0.0.1:<0.16593.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:17:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16574.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:17:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16651.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:17:00] [ns_1@127.0.0.1:<0.16622.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:00] [ns_1@127.0.0.1:<0.16549.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:01] [ns_1@127.0.0.1:<0.16537.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:01] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:17:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.16622.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:17:01] [ns_1@127.0.0.1:<0.16648.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:02] [ns_1@127.0.0.1:<0.16595.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:02] [ns_1@127.0.0.1:<0.16560.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:03] [ns_1@127.0.0.1:<0.16551.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:03] [ns_1@127.0.0.1:<0.16664.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:04] [ns_1@127.0.0.1:<0.16597.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:04] [ns_1@127.0.0.1:<0.16639.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:05] [ns_1@127.0.0.1:<0.16562.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:17:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16651.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:17:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16682.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:17:05] [ns_1@127.0.0.1:<0.16675.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:06] [ns_1@127.0.0.1:<0.16637.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:06] [ns_1@127.0.0.1:<0.16655.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:07] [ns_1@127.0.0.1:<0.16615.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:07] [ns_1@127.0.0.1:<0.16688.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:08] [ns_1@127.0.0.1:<0.16653.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:08] [ns_1@127.0.0.1:<0.16669.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:09] [ns_1@127.0.0.1:<0.16616.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:09] [ns_1@127.0.0.1:<0.16712.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:09] [ns_1@127.0.0.1:<0.16701.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:10] [ns_1@127.0.0.1:<0.16666.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:17:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16682.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:17:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16720.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:17:10] [ns_1@127.0.0.1:<0.16712.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:10] [ns_1@127.0.0.1:<0.16679.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:11] [ns_1@127.0.0.1:<0.16617.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:11] [ns_1@127.0.0.1:<0.16712.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:11] [ns_1@127.0.0.1:<0.16717.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:12] [ns_1@127.0.0.1:<0.16677.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:12] [ns_1@127.0.0.1:<0.16712.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:12] [ns_1@127.0.0.1:<0.16695.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:13] [ns_1@127.0.0.1:<0.16618.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:17:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.16712.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:17:13] [ns_1@127.0.0.1:<0.16734.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:14] [ns_1@127.0.0.1:<0.16693.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:14] [ns_1@127.0.0.1:<0.16706.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:15] [ns_1@127.0.0.1:<0.16626.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:17:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16720.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:17:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16754.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:17:15] [ns_1@127.0.0.1:<0.16746.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:16] [ns_1@127.0.0.1:<0.16703.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:16] [ns_1@127.0.0.1:<0.16726.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:17] [ns_1@127.0.0.1:<0.16642.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:17] [ns_1@127.0.0.1:<0.16761.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:18] [ns_1@127.0.0.1:<0.16722.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:18] [ns_1@127.0.0.1:<0.16739.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:19] [ns_1@127.0.0.1:<0.16660.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:19] [ns_1@127.0.0.1:<0.16780.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:19] [ns_1@127.0.0.1:<0.16772.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:20] [ns_1@127.0.0.1:<0.16737.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:17:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16754.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:17:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16788.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:17:20] [ns_1@127.0.0.1:<0.16780.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:21] [ns_1@127.0.0.1:<0.16751.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:21] [ns_1@127.0.0.1:<0.16671.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:21] [ns_1@127.0.0.1:<0.16780.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:22] [ns_1@127.0.0.1:<0.16785.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:22] [ns_1@127.0.0.1:<0.16749.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:22] [ns_1@127.0.0.1:<0.16780.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:23] [ns_1@127.0.0.1:<0.16765.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:23] [ns_1@127.0.0.1:<0.16683.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:17:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.16780.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:17:24] [ns_1@127.0.0.1:<0.16803.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:24] [ns_1@127.0.0.1:<0.16763.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:25] [ns_1@127.0.0.1:<0.16776.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:25] [ns_1@127.0.0.1:<0.16699.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:17:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16788.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:17:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16823.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:17:26] [ns_1@127.0.0.1:<0.16816.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:26] [ns_1@127.0.0.1:<0.16774.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:27] [ns_1@127.0.0.1:<0.16796.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:27] [ns_1@127.0.0.1:<0.16715.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:28] [ns_1@127.0.0.1:<0.16830.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:28] [ns_1@127.0.0.1:<0.16791.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:29] [ns_1@127.0.0.1:<0.16808.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:29] [ns_1@127.0.0.1:<0.16732.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:29] [ns_1@127.0.0.1:<0.16849.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:30] [ns_1@127.0.0.1:<0.16841.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:17:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16823.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:17:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16856.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:17:30] [ns_1@127.0.0.1:<0.16806.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:30] [ns_1@127.0.0.1:<0.16849.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:31] [ns_1@127.0.0.1:<0.16820.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:31] [ns_1@127.0.0.1:<0.16744.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:31] [ns_1@127.0.0.1:<0.16849.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:32] [ns_1@127.0.0.1:<0.16857.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:32] [ns_1@127.0.0.1:<0.16818.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:32] [ns_1@127.0.0.1:<0.16849.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:33] [ns_1@127.0.0.1:<0.16835.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:33] [ns_1@127.0.0.1:<0.16757.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:17:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.16849.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:17:34] [ns_1@127.0.0.1:<0.16871.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:34] [ns_1@127.0.0.1:<0.16833.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:35] [ns_1@127.0.0.1:<0.16845.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:17:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16856.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:17:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16889.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:17:35] [ns_1@127.0.0.1:<0.16770.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:36] [ns_1@127.0.0.1:<0.16884.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:36] [ns_1@127.0.0.1:<0.16843.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:37] [ns_1@127.0.0.1:<0.16864.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:37] [ns_1@127.0.0.1:<0.16783.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:38] [ns_1@127.0.0.1:<0.16900.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:38] [ns_1@127.0.0.1:<0.16860.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:39] [ns_1@127.0.0.1:<0.16877.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:39] [ns_1@127.0.0.1:<0.16917.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:39] [ns_1@127.0.0.1:<0.16801.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:40] [ns_1@127.0.0.1:<0.16910.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:17:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16889.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:17:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16925.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:17:40] [ns_1@127.0.0.1:<0.16917.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:40] [ns_1@127.0.0.1:<0.16875.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:41] [ns_1@127.0.0.1:<0.16890.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:41] [ns_1@127.0.0.1:<0.16917.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:41] [ns_1@127.0.0.1:<0.16814.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:42] [ns_1@127.0.0.1:<0.16927.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:42] [ns_1@127.0.0.1:<0.16917.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:42] [ns_1@127.0.0.1:<0.16886.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:43] [ns_1@127.0.0.1:<0.16904.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:17:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.16917.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:17:43] [ns_1@127.0.0.1:<0.16826.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:44] [ns_1@127.0.0.1:<0.16942.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:44] [ns_1@127.0.0.1:<0.16902.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:45] [ns_1@127.0.0.1:<0.16920.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:17:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16925.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:17:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16959.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:17:45] [ns_1@127.0.0.1:<0.16839.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:46] [ns_1@127.0.0.1:<0.16954.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:46] [ns_1@127.0.0.1:<0.16913.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:47] [ns_1@127.0.0.1:<0.16937.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:47] [ns_1@127.0.0.1:<0.16853.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:48] [ns_1@127.0.0.1:<0.16968.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:48] [ns_1@127.0.0.1:<0.16929.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:49] [ns_1@127.0.0.1:<0.16949.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:49] [ns_1@127.0.0.1:<0.16985.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:49] [ns_1@127.0.0.1:<0.16869.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:50] [ns_1@127.0.0.1:<0.16979.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:17:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16959.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:17:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.16993.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:17:50] [ns_1@127.0.0.1:<0.16985.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:50] [ns_1@127.0.0.1:<0.16944.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:51] [ns_1@127.0.0.1:<0.16962.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:51] [ns_1@127.0.0.1:<0.16985.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:51] [ns_1@127.0.0.1:<0.16882.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:52] [ns_1@127.0.0.1:<0.16996.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:52] [ns_1@127.0.0.1:<0.16985.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:53] [ns_1@127.0.0.1:<0.16956.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:53] [ns_1@127.0.0.1:<0.16975.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:17:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.16985.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:17:54] [ns_1@127.0.0.1:<0.16895.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:54] [ns_1@127.0.0.1:<0.17011.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:55] [ns_1@127.0.0.1:<0.16970.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:55] [ns_1@127.0.0.1:<0.16988.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:17:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.16993.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:17:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17028.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:17:56] [ns_1@127.0.0.1:<0.16908.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:56] [ns_1@127.0.0.1:<0.17023.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:57] [ns_1@127.0.0.1:<0.16981.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:57] [ns_1@127.0.0.1:<0.17006.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:17:58] [ns_1@127.0.0.1:<0.16922.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:17:59] [ns_1@127.0.0.1:<0.17066.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:17:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,753476,371309}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38242632}, {processes,10291368}, {processes_used,8665888}, {system,27951264}, {atom,1306681}, {atom_used,1284164}, {binary,365904}, {code,12859877}, {ets,2413040}]}, {system_stats, [{cpu_utilization_rate,25.44080604534005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,4498}, {memory_data,{4040077312,4012974080,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26856 kB\nBuffers: 61444 kB\nCached: 3529376 kB\nSwapCached: 0 kB\nActive: 309316 kB\nInactive: 3441912 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26856 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 68 kB\nWriteback: 16 kB\nAnonPages: 160416 kB\nMapped: 24872 kB\nSlab: 134384 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579608 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3614081024}, {buffered_memory,62918656}, {free_memory,27500544}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{4493107,1}}, {context_switches,{1380276,0}}, {garbage_collection,{729158,1006624552,0}}, {io,{{input,24577303},{output,51887172}}}, {reductions,{294837888,616687}}, {run_queue,0}, {runtime,{58880,190}}]}]}] [error_logger:error] [2012-03-26 2:18:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17028.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:18:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17071.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:18:00] [ns_1@127.0.0.1:<0.17066.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:01] [ns_1@127.0.0.1:<0.17066.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:02] [ns_1@127.0.0.1:<0.17066.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:18:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.17066.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 2:18:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17071.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:18:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17084.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:18:08] [ns_1@127.0.0.1:<0.16939.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:08] [ns_1@127.0.0.1:<0.17038.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:09] [ns_1@127.0.0.1:<0.17001.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:09] [ns_1@127.0.0.1:<0.17019.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:09] [ns_1@127.0.0.1:<0.17106.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:10] [ns_1@127.0.0.1:<0.16951.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:18:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17084.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:18:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17112.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:18:10] [ns_1@127.0.0.1:<0.17098.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:10] [ns_1@127.0.0.1:<0.17106.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:11] [ns_1@127.0.0.1:<0.17013.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:11] [ns_1@127.0.0.1:<0.17031.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:11] [ns_1@127.0.0.1:<0.17106.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:12] [ns_1@127.0.0.1:<0.16966.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:12] [ns_1@127.0.0.1:<0.17116.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:12] [ns_1@127.0.0.1:<0.17106.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:13] [ns_1@127.0.0.1:<0.17025.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:13] [ns_1@127.0.0.1:<0.17044.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:18:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.17106.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:18:14] [ns_1@127.0.0.1:<0.16977.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:14] [ns_1@127.0.0.1:<0.17131.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:15] [ns_1@127.0.0.1:<0.17040.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:18:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17112.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:18:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17146.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:18:15] [ns_1@127.0.0.1:<0.17093.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:16] [ns_1@127.0.0.1:<0.16990.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:16] [ns_1@127.0.0.1:<0.17143.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:17] [ns_1@127.0.0.1:<0.17100.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:17] [ns_1@127.0.0.1:<0.17109.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:18] [ns_1@127.0.0.1:<0.17008.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:18] [ns_1@127.0.0.1:<0.17157.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:19] [ns_1@127.0.0.1:<0.17120.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:19] [ns_1@127.0.0.1:<0.17172.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:19] [ns_1@127.0.0.1:<0.17126.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:20] [ns_1@127.0.0.1:<0.17021.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:18:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17146.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:18:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17180.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:18:20] [ns_1@127.0.0.1:<0.17172.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:20] [ns_1@127.0.0.1:<0.17168.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:21] [ns_1@127.0.0.1:<0.17133.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:21] [ns_1@127.0.0.1:<0.17172.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:21] [ns_1@127.0.0.1:<0.17138.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:22] [ns_1@127.0.0.1:<0.17035.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:22] [ns_1@127.0.0.1:<0.17172.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:22] [ns_1@127.0.0.1:<0.17185.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:23] [ns_1@127.0.0.1:<0.17147.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:18:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.17172.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:18:23] [ns_1@127.0.0.1:<0.17151.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:24] [ns_1@127.0.0.1:<0.17095.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:24] [ns_1@127.0.0.1:<0.17200.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:25] [ns_1@127.0.0.1:<0.17159.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:18:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17180.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:18:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17215.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:18:25] [ns_1@127.0.0.1:<0.17164.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:26] [ns_1@127.0.0.1:<0.17113.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:26] [ns_1@127.0.0.1:<0.17212.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:27] [ns_1@127.0.0.1:<0.17175.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:27] [ns_1@127.0.0.1:<0.17177.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:28] [ns_1@127.0.0.1:<0.17128.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:28] [ns_1@127.0.0.1:<0.17227.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:29] [ns_1@127.0.0.1:<0.17193.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:29] [ns_1@127.0.0.1:<0.17241.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:29] [ns_1@127.0.0.1:<0.17195.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:30] [ns_1@127.0.0.1:<0.17140.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:18:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17215.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:18:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17250.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:18:30] [ns_1@127.0.0.1:<0.17241.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:30] [ns_1@127.0.0.1:<0.17237.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:31] [ns_1@127.0.0.1:<0.17206.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:31] [ns_1@127.0.0.1:<0.17241.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:31] [ns_1@127.0.0.1:<0.17208.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:32] [ns_1@127.0.0.1:<0.17155.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:32] [ns_1@127.0.0.1:<0.17241.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:32] [ns_1@127.0.0.1:<0.17256.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:33] [ns_1@127.0.0.1:<0.17218.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:18:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.17241.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:18:34] [ns_1@127.0.0.1:<0.17220.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:34] [ns_1@127.0.0.1:<0.17166.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:35] [ns_1@127.0.0.1:<0.17269.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:35] [ns_1@127.0.0.1:<0.17231.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:18:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17250.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:18:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17283.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:18:36] [ns_1@127.0.0.1:<0.17233.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:36] [ns_1@127.0.0.1:<0.17182.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:37] [ns_1@127.0.0.1:<0.17280.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:37] [ns_1@127.0.0.1:<0.17245.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:38] [ns_1@127.0.0.1:<0.17247.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:38] [ns_1@127.0.0.1:<0.17198.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:39] [ns_1@127.0.0.1:<0.17296.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:39] [ns_1@127.0.0.1:<0.17261.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:39] [ns_1@127.0.0.1:<0.17311.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:40] [ns_1@127.0.0.1:<0.17263.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:18:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17283.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:18:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17318.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:18:40] [ns_1@127.0.0.1:<0.17210.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:40] [ns_1@127.0.0.1:<0.17311.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:41] [ns_1@127.0.0.1:<0.17307.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:41] [ns_1@127.0.0.1:<0.17274.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:41] [ns_1@127.0.0.1:<0.17311.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:42] [ns_1@127.0.0.1:<0.17276.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:42] [ns_1@127.0.0.1:<0.17225.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:42] [ns_1@127.0.0.1:<0.17311.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:43] [ns_1@127.0.0.1:<0.17325.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:43] [ns_1@127.0.0.1:<0.17287.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:18:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.17311.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:18:44] [ns_1@127.0.0.1:<0.17292.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:44] [ns_1@127.0.0.1:<0.17235.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:45] [ns_1@127.0.0.1:<0.17338.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:18:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17318.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:18:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17351.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:18:45] [ns_1@127.0.0.1:<0.17300.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:46] [ns_1@127.0.0.1:<0.17302.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:46] [ns_1@127.0.0.1:<0.17252.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:47] [ns_1@127.0.0.1:<0.17352.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:47] [ns_1@127.0.0.1:<0.17314.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:48] [ns_1@127.0.0.1:<0.17316.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:48] [ns_1@127.0.0.1:<0.17267.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:49] [ns_1@127.0.0.1:<0.17364.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:49] [ns_1@127.0.0.1:<0.17331.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:49] [ns_1@127.0.0.1:<0.17379.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:50] [ns_1@127.0.0.1:<0.17333.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:18:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17351.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:18:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17385.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:18:50] [ns_1@127.0.0.1:<0.17278.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:50] [ns_1@127.0.0.1:<0.17379.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:51] [ns_1@127.0.0.1:<0.17375.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:51] [ns_1@127.0.0.1:<0.17343.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:51] [ns_1@127.0.0.1:<0.17379.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:52] [ns_1@127.0.0.1:<0.17345.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:52] [ns_1@127.0.0.1:<0.17379.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:52] [ns_1@127.0.0.1:<0.17294.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:53] [ns_1@127.0.0.1:<0.17395.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:18:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.17379.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:18:53] [ns_1@127.0.0.1:<0.17356.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:54] [ns_1@127.0.0.1:<0.17360.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:54] [ns_1@127.0.0.1:<0.17305.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:55] [ns_1@127.0.0.1:<0.17410.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:18:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17385.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:18:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17420.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:18:55] [ns_1@127.0.0.1:<0.17369.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:56] [ns_1@127.0.0.1:<0.17371.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:56] [ns_1@127.0.0.1:<0.17321.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:57] [ns_1@127.0.0.1:<0.17423.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:57] [ns_1@127.0.0.1:<0.17382.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:58] [ns_1@127.0.0.1:<0.17386.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:58] [ns_1@127.0.0.1:<0.17336.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:18:59] [ns_1@127.0.0.1:<0.17436.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:18:59] [ns_1@127.0.0.1:<0.17447.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:18:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,753536,407361}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38230232}, {processes,10236808}, {processes_used,8611328}, {system,27993424}, {atom,1306681}, {atom_used,1284164}, {binary,370848}, {code,12859877}, {ets,2444408}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,4558}, {memory_data,{4040077312,4012576768,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26732 kB\nBuffers: 61552 kB\nCached: 3529500 kB\nSwapCached: 0 kB\nActive: 309332 kB\nInactive: 3442132 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26732 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 84 kB\nWriteback: 0 kB\nAnonPages: 160420 kB\nMapped: 24872 kB\nSlab: 134344 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579608 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3614208000}, {buffered_memory,63029248}, {free_memory,27373568}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{4553144,1}}, {context_switches,{1393190,0}}, {garbage_collection,{736070,1016566682,0}}, {io,{{input,24863371},{output,52528930}}}, {reductions,{297260409,621995}}, {run_queue,0}, {runtime,{59620,190}}]}]}] [stats:error] [2012-03-26 2:18:59] [ns_1@127.0.0.1:<0.17400.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:00] [ns_1@127.0.0.1:<0.17403.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:19:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17420.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:19:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17456.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:19:00] [ns_1@127.0.0.1:<0.17447.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:00] [ns_1@127.0.0.1:<0.17348.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:01] [ns_1@127.0.0.1:<0.17447.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:01] [ns_1@127.0.0.1:<0.17451.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:02] [ns_1@127.0.0.1:<0.17425.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:02] [ns_1@127.0.0.1:<0.17447.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:02] [ns_1@127.0.0.1:<0.17430.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:03] [ns_1@127.0.0.1:<0.17432.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:19:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.17447.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:19:03] [ns_1@127.0.0.1:<0.17413.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:04] [ns_1@127.0.0.1:<0.17438.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:04] [ns_1@127.0.0.1:<0.17440.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:05] [ns_1@127.0.0.1:<0.17442.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:19:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17456.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:19:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17487.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:19:05] [ns_1@127.0.0.1:<0.17467.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:06] [ns_1@127.0.0.1:<0.17453.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:06] [ns_1@127.0.0.1:<0.17458.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:07] [ns_1@127.0.0.1:<0.17465.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:07] [ns_1@127.0.0.1:<0.17480.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:08] [ns_1@127.0.0.1:<0.17415.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:08] [ns_1@127.0.0.1:<0.17471.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:08] [ns_1@127.0.0.1:<0.17482.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:08] [ns_1@127.0.0.1:<0.17362.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:09] [ns_1@127.0.0.1:<0.17478.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:09] [ns_1@127.0.0.1:<0.17521.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:09] [ns_1@127.0.0.1:<0.17493.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:10] [ns_1@127.0.0.1:<0.17498.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:19:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17487.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:19:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17529.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:19:10] [ns_1@127.0.0.1:<0.17521.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:10] [ns_1@127.0.0.1:<0.17373.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:11] [ns_1@127.0.0.1:<0.17491.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:11] [ns_1@127.0.0.1:<0.17521.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:11] [ns_1@127.0.0.1:<0.17506.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:12] [ns_1@127.0.0.1:<0.17513.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:12] [ns_1@127.0.0.1:<0.17521.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:13] [ns_1@127.0.0.1:<0.17390.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [stats:error] [2012-03-26 2:19:13] [ns_1@127.0.0.1:<0.17504.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:19:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.17521.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 2:19:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17529.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:19:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17555.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:19:17] [ns_1@127.0.0.1:<0.17405.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:17] [ns_1@127.0.0.1:<0.17524.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:18] [ns_1@127.0.0.1:<0.17508.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:18] [ns_1@127.0.0.1:<0.17531.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:19] [ns_1@127.0.0.1:<0.17417.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:19] [ns_1@127.0.0.1:<0.17575.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:20] [ns_1@127.0.0.1:<0.17567.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:20] [ns_1@127.0.0.1:<0.17569.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:19:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17555.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:19:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17583.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:19:20] [ns_1@127.0.0.1:<0.17575.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:21] [ns_1@127.0.0.1:<0.17560.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:21] [ns_1@127.0.0.1:<0.17564.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:21] [ns_1@127.0.0.1:<0.17575.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:22] [ns_1@127.0.0.1:<0.17510.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:22] [ns_1@127.0.0.1:<0.17546.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:22] [ns_1@127.0.0.1:<0.17575.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:23] [ns_1@127.0.0.1:<0.17571.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:23] [ns_1@127.0.0.1:<0.17541.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:19:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.17575.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:19:24] [ns_1@127.0.0.1:<0.17526.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:24] [ns_1@127.0.0.1:<0.17586.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:25] [ns_1@127.0.0.1:<0.17473.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:25] [ns_1@127.0.0.1:<0.17578.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:19:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17583.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:19:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17618.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:19:26] [ns_1@127.0.0.1:<0.17543.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:26] [ns_1@127.0.0.1:<0.17601.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:27] [ns_1@127.0.0.1:<0.17484.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:27] [ns_1@127.0.0.1:<0.17596.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:28] [ns_1@127.0.0.1:<0.17580.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:28] [ns_1@127.0.0.1:<0.17613.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:29] [ns_1@127.0.0.1:<0.17500.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:29] [ns_1@127.0.0.1:<0.17609.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:29] [ns_1@127.0.0.1:<0.17644.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:30] [ns_1@127.0.0.1:<0.17598.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:19:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17618.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:19:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17651.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:19:30] [ns_1@127.0.0.1:<0.17628.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:30] [ns_1@127.0.0.1:<0.17644.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:31] [ns_1@127.0.0.1:<0.17515.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:31] [ns_1@127.0.0.1:<0.17621.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:31] [ns_1@127.0.0.1:<0.17644.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:32] [ns_1@127.0.0.1:<0.17611.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:32] [ns_1@127.0.0.1:<0.17638.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:32] [ns_1@127.0.0.1:<0.17644.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:33] [ns_1@127.0.0.1:<0.17535.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:33] [ns_1@127.0.0.1:<0.17634.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:19:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.17644.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:19:34] [ns_1@127.0.0.1:<0.17625.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:34] [ns_1@127.0.0.1:<0.17655.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:35] [ns_1@127.0.0.1:<0.17548.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:19:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17651.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:19:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17684.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:19:36] [ns_1@127.0.0.1:<0.17670.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:37] [ns_1@127.0.0.1:<0.17591.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:37] [ns_1@127.0.0.1:<0.17648.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:38] [ns_1@127.0.0.1:<0.17636.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:38] [ns_1@127.0.0.1:<0.17681.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:39] [ns_1@127.0.0.1:<0.17603.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:39] [ns_1@127.0.0.1:<0.17708.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:39] [ns_1@127.0.0.1:<0.17664.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:40] [ns_1@127.0.0.1:<0.17652.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:19:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17684.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:19:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17716.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:19:40] [ns_1@127.0.0.1:<0.17708.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:40] [ns_1@127.0.0.1:<0.17693.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:41] [ns_1@127.0.0.1:<0.17615.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:41] [ns_1@127.0.0.1:<0.17708.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:41] [ns_1@127.0.0.1:<0.17677.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:42] [ns_1@127.0.0.1:<0.17666.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:42] [ns_1@127.0.0.1:<0.17708.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:42] [ns_1@127.0.0.1:<0.17704.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:43] [ns_1@127.0.0.1:<0.17630.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:19:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.17708.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:19:43] [ns_1@127.0.0.1:<0.17699.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:44] [ns_1@127.0.0.1:<0.17679.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:44] [ns_1@127.0.0.1:<0.17720.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:45] [ns_1@127.0.0.1:<0.17640.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:19:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17716.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:19:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17750.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:19:45] [ns_1@127.0.0.1:<0.17713.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:46] [ns_1@127.0.0.1:<0.17691.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:46] [ns_1@127.0.0.1:<0.17735.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:47] [ns_1@127.0.0.1:<0.17659.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:47] [ns_1@127.0.0.1:<0.17730.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:48] [ns_1@127.0.0.1:<0.17702.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:48] [ns_1@127.0.0.1:<0.17747.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:49] [ns_1@127.0.0.1:<0.17672.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:49] [ns_1@127.0.0.1:<0.17776.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:49] [ns_1@127.0.0.1:<0.17742.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:50] [ns_1@127.0.0.1:<0.17718.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:19:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17750.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:19:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17784.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:19:50] [ns_1@127.0.0.1:<0.17776.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:51] [ns_1@127.0.0.1:<0.17761.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:51] [ns_1@127.0.0.1:<0.17697.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:51] [ns_1@127.0.0.1:<0.17776.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:52] [ns_1@127.0.0.1:<0.17757.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:52] [ns_1@127.0.0.1:<0.17733.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:52] [ns_1@127.0.0.1:<0.17776.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:53] [ns_1@127.0.0.1:<0.17772.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:53] [ns_1@127.0.0.1:<0.17711.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:19:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.17776.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:19:54] [ns_1@127.0.0.1:<0.17768.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:54] [ns_1@127.0.0.1:<0.17745.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:55] [ns_1@127.0.0.1:<0.17792.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:55] [ns_1@127.0.0.1:<0.17728.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:19:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17784.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:19:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17819.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:19:56] [ns_1@127.0.0.1:<0.17781.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:56] [ns_1@127.0.0.1:<0.17759.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:57] [ns_1@127.0.0.1:<0.17804.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:57] [ns_1@127.0.0.1:<0.17740.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:58] [ns_1@127.0.0.1:<0.17799.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:58] [ns_1@127.0.0.1:<0.17770.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:59] [ns_1@127.0.0.1:<0.17816.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:19:59] [ns_1@127.0.0.1:<0.17753.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:19:59] [ns_1@127.0.0.1:<0.17846.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:19:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,753596,438360}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38337624}, {processes,10367600}, {processes_used,8742120}, {system,27970024}, {atom,1306681}, {atom_used,1284164}, {binary,368336}, {code,12859877}, {ets,2416496}]}, {system_stats, [{cpu_utilization_rate,25.75}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,4618}, {memory_data,{4040077312,4012711936,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26732 kB\nBuffers: 61628 kB\nCached: 3529652 kB\nSwapCached: 0 kB\nActive: 309444 kB\nInactive: 3442224 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26732 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 80 kB\nWriteback: 0 kB\nAnonPages: 160432 kB\nMapped: 24872 kB\nSlab: 134336 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579608 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3614363648}, {buffered_memory,63107072}, {free_memory,27373568}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{4613175,0}}, {context_switches,{1405730,0}}, {garbage_collection,{742638,1027007061,0}}, {io,{{input,24893906},{output,52949616}}}, {reductions,{299683333,583689}}, {run_queue,0}, {runtime,{60310,180}}]}]}] [stats:error] [2012-03-26 2:20:00] [ns_1@127.0.0.1:<0.17812.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:20:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17819.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:20:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17853.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:20:00] [ns_1@127.0.0.1:<0.17787.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:00] [ns_1@127.0.0.1:<0.17846.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:01] [ns_1@127.0.0.1:<0.17831.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:01] [ns_1@127.0.0.1:<0.17766.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:01] [ns_1@127.0.0.1:<0.17846.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:02] [ns_1@127.0.0.1:<0.17826.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:02] [ns_1@127.0.0.1:<0.17802.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:02] [ns_1@127.0.0.1:<0.17846.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:03] [ns_1@127.0.0.1:<0.17841.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:03] [ns_1@127.0.0.1:<0.17779.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:20:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.17846.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:20:04] [ns_1@127.0.0.1:<0.17837.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:04] [ns_1@127.0.0.1:<0.17814.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:05] [ns_1@127.0.0.1:<0.17861.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:20:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17853.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:20:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17886.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:20:05] [ns_1@127.0.0.1:<0.17797.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:06] [ns_1@127.0.0.1:<0.17854.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:06] [ns_1@127.0.0.1:<0.17829.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:07] [ns_1@127.0.0.1:<0.17874.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:07] [ns_1@127.0.0.1:<0.17810.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:08] [ns_1@127.0.0.1:<0.17868.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:08] [ns_1@127.0.0.1:<0.17839.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:08] [ns_1@127.0.0.1:<0.17857.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:08] [ns_1@127.0.0.1:<0.17872.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:09] [ns_1@127.0.0.1:<0.17887.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:09] [ns_1@127.0.0.1:<0.17920.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:09] [ns_1@127.0.0.1:<0.17822.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:10] [ns_1@127.0.0.1:<0.17881.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:20:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17886.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:20:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17928.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:20:10] [ns_1@127.0.0.1:<0.17920.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:10] [ns_1@127.0.0.1:<0.17883.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:11] [ns_1@127.0.0.1:<0.17901.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:11] [ns_1@127.0.0.1:<0.17920.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:11] [ns_1@127.0.0.1:<0.17835.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:12] [ns_1@127.0.0.1:<0.17897.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:12] [ns_1@127.0.0.1:<0.17920.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:12] [ns_1@127.0.0.1:<0.17899.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:13] [ns_1@127.0.0.1:<0.17923.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:20:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.17920.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:20:13] [ns_1@127.0.0.1:<0.17850.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:14] [ns_1@127.0.0.1:<0.17907.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:14] [ns_1@127.0.0.1:<0.17914.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:15] [ns_1@127.0.0.1:<0.17940.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:20:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17928.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:20:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17962.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:20:15] [ns_1@127.0.0.1:<0.17866.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:16] [ns_1@127.0.0.1:<0.17909.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:16] [ns_1@127.0.0.1:<0.17932.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:17] [ns_1@127.0.0.1:<0.17952.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:17] [ns_1@127.0.0.1:<0.17879.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:18] [ns_1@127.0.0.1:<0.17911.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:18] [ns_1@127.0.0.1:<0.17947.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:19] [ns_1@127.0.0.1:<0.17965.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:19] [ns_1@127.0.0.1:<0.17988.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:19] [ns_1@127.0.0.1:<0.17892.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:20] [ns_1@127.0.0.1:<0.17930.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:20:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17962.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:20:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.17996.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:20:20] [ns_1@127.0.0.1:<0.17988.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:20] [ns_1@127.0.0.1:<0.17959.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:21] [ns_1@127.0.0.1:<0.17978.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:21] [ns_1@127.0.0.1:<0.17988.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:21] [ns_1@127.0.0.1:<0.17905.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:22] [ns_1@127.0.0.1:<0.17945.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:22] [ns_1@127.0.0.1:<0.17988.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:23] [ns_1@127.0.0.1:<0.17973.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:23] [ns_1@127.0.0.1:<0.17991.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:20:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.17988.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:20:24] [ns_1@127.0.0.1:<0.17925.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:24] [ns_1@127.0.0.1:<0.17957.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:25] [ns_1@127.0.0.1:<0.17984.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:20:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.17996.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:20:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18029.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:20:29] [ns_1@127.0.0.1:<0.18004.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:29] [ns_1@127.0.0.1:<0.18041.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:29] [ns_1@127.0.0.1:<0.18009.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:30] [ns_1@127.0.0.1:<0.17942.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:20:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18029.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:20:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18050.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:20:30] [ns_1@127.0.0.1:<0.18041.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:30] [ns_1@127.0.0.1:<0.17971.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:31] [ns_1@127.0.0.1:<0.18016.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:31] [ns_1@127.0.0.1:<0.18041.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:31] [ns_1@127.0.0.1:<0.18022.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:32] [ns_1@127.0.0.1:<0.17954.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:32] [ns_1@127.0.0.1:<0.18041.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:32] [ns_1@127.0.0.1:<0.17982.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:20:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.18041.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 2:20:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18050.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:20:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18073.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:20:37] [ns_1@127.0.0.1:<0.18037.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:37] [ns_1@127.0.0.1:<0.18044.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:38] [ns_1@127.0.0.1:<0.18047.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:38] [ns_1@127.0.0.1:<0.18052.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:39] [ns_1@127.0.0.1:<0.18054.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:39] [ns_1@127.0.0.1:<0.18095.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:39] [ns_1@127.0.0.1:<0.18061.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:40] [ns_1@127.0.0.1:<0.18063.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:20:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18073.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:20:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18103.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:20:40] [ns_1@127.0.0.1:<0.18095.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:40] [ns_1@127.0.0.1:<0.18067.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:41] [ns_1@127.0.0.1:<0.18084.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:41] [ns_1@127.0.0.1:<0.18095.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:41] [ns_1@127.0.0.1:<0.18086.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:42] [ns_1@127.0.0.1:<0.17967.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:42] [ns_1@127.0.0.1:<0.18095.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:42] [ns_1@127.0.0.1:<0.17999.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:43] [ns_1@127.0.0.1:<0.18098.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:20:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.18095.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:20:43] [ns_1@127.0.0.1:<0.18100.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:44] [ns_1@127.0.0.1:<0.17980.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:44] [ns_1@127.0.0.1:<0.18014.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:45] [ns_1@127.0.0.1:<0.18115.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:20:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18103.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:20:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18137.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:20:45] [ns_1@127.0.0.1:<0.18117.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:46] [ns_1@127.0.0.1:<0.17993.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:46] [ns_1@127.0.0.1:<0.18026.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:47] [ns_1@127.0.0.1:<0.18127.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:47] [ns_1@127.0.0.1:<0.18129.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:48] [ns_1@127.0.0.1:<0.18011.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:48] [ns_1@127.0.0.1:<0.18080.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:49] [ns_1@127.0.0.1:<0.18140.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:49] [ns_1@127.0.0.1:<0.18163.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:49] [ns_1@127.0.0.1:<0.18142.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:50] [ns_1@127.0.0.1:<0.18024.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:20:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18137.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:20:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18171.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:20:50] [ns_1@127.0.0.1:<0.18163.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:50] [ns_1@127.0.0.1:<0.18091.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:51] [ns_1@127.0.0.1:<0.18153.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:51] [ns_1@127.0.0.1:<0.18163.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:52] [ns_1@127.0.0.1:<0.18155.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:52] [ns_1@127.0.0.1:<0.18088.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:52] [ns_1@127.0.0.1:<0.18163.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:53] [ns_1@127.0.0.1:<0.18107.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:20:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.18163.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:20:54] [ns_1@127.0.0.1:<0.18105.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:55] [ns_1@127.0.0.1:<0.18122.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:20:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18171.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:20:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18200.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:20:55] [ns_1@127.0.0.1:<0.18166.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:56] [ns_1@127.0.0.1:<0.18168.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:56] [ns_1@127.0.0.1:<0.18120.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:57] [ns_1@127.0.0.1:<0.18134.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:57] [ns_1@127.0.0.1:<0.18184.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:58] [ns_1@127.0.0.1:<0.18186.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:58] [ns_1@127.0.0.1:<0.18132.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:20:59] [ns_1@127.0.0.1:<0.18148.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:20:59] [ns_1@127.0.0.1:<0.18240.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:20:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,753656,467484}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38401936}, {processes,10395952}, {processes_used,8770472}, {system,28005984}, {atom,1306681}, {atom_used,1284164}, {binary,368336}, {code,12859877}, {ets,2445176}]}, {system_stats, [{cpu_utilization_rate,25.12562814070352}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,4678}, {memory_data,{4040077312,4012830720,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26484 kB\nBuffers: 61684 kB\nCached: 3529808 kB\nSwapCached: 0 kB\nActive: 309572 kB\nInactive: 3442360 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26484 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 92 kB\nWriteback: 0 kB\nAnonPages: 160440 kB\nMapped: 24872 kB\nSlab: 134332 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579608 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3614523392}, {buffered_memory,63164416}, {free_memory,27119616}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{4673204,0}}, {context_switches,{1418058,0}}, {garbage_collection,{749312,1037183429,0}}, {io,{{input,24924432},{output,53365935}}}, {reductions,{302083519,619612}}, {run_queue,0}, {runtime,{60990,160}}]}]}] [stats:error] [2012-03-26 2:20:59] [ns_1@127.0.0.1:<0.18205.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:00] [ns_1@127.0.0.1:<0.18195.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:21:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18200.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:21:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18249.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:21:00] [ns_1@127.0.0.1:<0.18240.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:00] [ns_1@127.0.0.1:<0.18146.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:01] [ns_1@127.0.0.1:<0.18159.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:01] [ns_1@127.0.0.1:<0.18240.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:01] [ns_1@127.0.0.1:<0.18218.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:02] [ns_1@127.0.0.1:<0.18209.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:02] [ns_1@127.0.0.1:<0.18240.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:02] [ns_1@127.0.0.1:<0.18157.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:03] [ns_1@127.0.0.1:<0.18179.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:21:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.18240.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:21:03] [ns_1@127.0.0.1:<0.18246.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:04] [ns_1@127.0.0.1:<0.18220.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:04] [ns_1@127.0.0.1:<0.18174.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:05] [ns_1@127.0.0.1:<0.18203.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:21:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18249.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:21:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18282.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:21:05] [ns_1@127.0.0.1:<0.18262.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:06] [ns_1@127.0.0.1:<0.18251.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:06] [ns_1@127.0.0.1:<0.18189.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:07] [ns_1@127.0.0.1:<0.18216.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:07] [ns_1@127.0.0.1:<0.18275.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:08] [ns_1@127.0.0.1:<0.18266.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:09] [ns_1@127.0.0.1:<0.18197.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:09] [ns_1@127.0.0.1:<0.18212.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:09] [ns_1@127.0.0.1:<0.18222.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:09] [ns_1@127.0.0.1:<0.18244.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:09] [ns_1@127.0.0.1:<0.18316.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:10] [ns_1@127.0.0.1:<0.18288.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:10] [ns_1@127.0.0.1:<0.18277.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:21:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18282.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:21:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18324.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:21:10] [ns_1@127.0.0.1:<0.18316.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:11] [ns_1@127.0.0.1:<0.18253.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:11] [ns_1@127.0.0.1:<0.18260.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:11] [ns_1@127.0.0.1:<0.18316.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:12] [ns_1@127.0.0.1:<0.18301.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:12] [ns_1@127.0.0.1:<0.18293.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:12] [ns_1@127.0.0.1:<0.18316.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:21:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.18316.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:21:15] [ns_1@127.0.0.1:<0.18268.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:21:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18324.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:21:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18348.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:21:15] [ns_1@127.0.0.1:<0.18273.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:16] [ns_1@127.0.0.1:<0.18321.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:16] [ns_1@127.0.0.1:<0.18304.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:17] [ns_1@127.0.0.1:<0.18279.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:17] [ns_1@127.0.0.1:<0.18286.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:18] [ns_1@127.0.0.1:<0.18338.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:18] [ns_1@127.0.0.1:<0.18306.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:19] [ns_1@127.0.0.1:<0.18295.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:19] [ns_1@127.0.0.1:<0.18374.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:19] [ns_1@127.0.0.1:<0.18299.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:20] [ns_1@127.0.0.1:<0.18357.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:21:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18348.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:21:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18382.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:21:20] [ns_1@127.0.0.1:<0.18374.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:20] [ns_1@127.0.0.1:<0.18308.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:21] [ns_1@127.0.0.1:<0.18310.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:21] [ns_1@127.0.0.1:<0.18374.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:21] [ns_1@127.0.0.1:<0.18319.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:22] [ns_1@127.0.0.1:<0.18368.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:22] [ns_1@127.0.0.1:<0.18374.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:22] [ns_1@127.0.0.1:<0.18326.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:23] [ns_1@127.0.0.1:<0.18330.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:21:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.18374.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:21:23] [ns_1@127.0.0.1:<0.18336.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:24] [ns_1@127.0.0.1:<0.18385.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:24] [ns_1@127.0.0.1:<0.18345.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:25] [ns_1@127.0.0.1:<0.18351.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:21:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18382.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:21:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18417.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:21:25] [ns_1@127.0.0.1:<0.18353.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:26] [ns_1@127.0.0.1:<0.18400.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:26] [ns_1@127.0.0.1:<0.18359.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:27] [ns_1@127.0.0.1:<0.18364.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:27] [ns_1@127.0.0.1:<0.18366.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:28] [ns_1@127.0.0.1:<0.18412.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:29] [ns_1@127.0.0.1:<0.18370.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:29] [ns_1@127.0.0.1:<0.18377.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:29] [ns_1@127.0.0.1:<0.18443.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:30] [ns_1@127.0.0.1:<0.18379.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:30] [ns_1@127.0.0.1:<0.18427.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:21:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18417.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:21:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18452.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:21:30] [ns_1@127.0.0.1:<0.18443.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:31] [ns_1@127.0.0.1:<0.18387.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:31] [ns_1@127.0.0.1:<0.18395.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:31] [ns_1@127.0.0.1:<0.18443.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:32] [ns_1@127.0.0.1:<0.18397.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:32] [ns_1@127.0.0.1:<0.18437.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:32] [ns_1@127.0.0.1:<0.18443.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:33] [ns_1@127.0.0.1:<0.18402.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:33] [ns_1@127.0.0.1:<0.18408.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:21:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.18443.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:21:34] [ns_1@127.0.0.1:<0.18410.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:34] [ns_1@127.0.0.1:<0.18454.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:35] [ns_1@127.0.0.1:<0.18414.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:35] [ns_1@127.0.0.1:<0.18420.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:21:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18452.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:21:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18484.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:21:36] [ns_1@127.0.0.1:<0.18422.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:36] [ns_1@127.0.0.1:<0.18468.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:37] [ns_1@127.0.0.1:<0.18429.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:37] [ns_1@127.0.0.1:<0.18433.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:38] [ns_1@127.0.0.1:<0.18435.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:38] [ns_1@127.0.0.1:<0.18479.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:39] [ns_1@127.0.0.1:<0.18439.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:39] [ns_1@127.0.0.1:<0.18447.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:39] [ns_1@127.0.0.1:<0.18512.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:40] [ns_1@127.0.0.1:<0.18449.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:21:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18484.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:21:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18518.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:21:40] [ns_1@127.0.0.1:<0.18495.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:40] [ns_1@127.0.0.1:<0.18512.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:41] [ns_1@127.0.0.1:<0.18458.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:41] [ns_1@127.0.0.1:<0.18463.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:41] [ns_1@127.0.0.1:<0.18512.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:42] [ns_1@127.0.0.1:<0.18465.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:42] [ns_1@127.0.0.1:<0.18506.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:42] [ns_1@127.0.0.1:<0.18512.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:43] [ns_1@127.0.0.1:<0.18470.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:43] [ns_1@127.0.0.1:<0.18475.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:21:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.18512.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:21:44] [ns_1@127.0.0.1:<0.18477.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:44] [ns_1@127.0.0.1:<0.18522.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:45] [ns_1@127.0.0.1:<0.18481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:21:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18518.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:21:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18552.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:21:45] [ns_1@127.0.0.1:<0.18488.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:46] [ns_1@127.0.0.1:<0.18493.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:46] [ns_1@127.0.0.1:<0.18537.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:47] [ns_1@127.0.0.1:<0.18497.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:47] [ns_1@127.0.0.1:<0.18501.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:48] [ns_1@127.0.0.1:<0.18503.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:48] [ns_1@127.0.0.1:<0.18549.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:49] [ns_1@127.0.0.1:<0.18508.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:49] [ns_1@127.0.0.1:<0.18578.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:49] [ns_1@127.0.0.1:<0.18515.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:50] [ns_1@127.0.0.1:<0.18519.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:21:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18552.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:21:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18586.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:21:50] [ns_1@127.0.0.1:<0.18578.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:50] [ns_1@127.0.0.1:<0.18563.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:51] [ns_1@127.0.0.1:<0.18526.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:51] [ns_1@127.0.0.1:<0.18578.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:51] [ns_1@127.0.0.1:<0.18532.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:52] [ns_1@127.0.0.1:<0.18534.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:52] [ns_1@127.0.0.1:<0.18578.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:52] [ns_1@127.0.0.1:<0.18574.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:53] [ns_1@127.0.0.1:<0.18539.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:21:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.18578.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:21:53] [ns_1@127.0.0.1:<0.18544.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:54] [ns_1@127.0.0.1:<0.18546.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:54] [ns_1@127.0.0.1:<0.18591.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:55] [ns_1@127.0.0.1:<0.18553.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:21:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18586.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:21:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18621.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:21:55] [ns_1@127.0.0.1:<0.18557.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:56] [ns_1@127.0.0.1:<0.18561.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:56] [ns_1@127.0.0.1:<0.18606.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:57] [ns_1@127.0.0.1:<0.18567.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:57] [ns_1@127.0.0.1:<0.18570.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:58] [ns_1@127.0.0.1:<0.18572.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:58] [ns_1@127.0.0.1:<0.18618.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:21:59] [ns_1@127.0.0.1:<0.18581.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:21:59] [ns_1@127.0.0.1:<0.18648.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:21:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,753716,498327}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38290776}, {processes,10307216}, {processes_used,8681736}, {system,27983560}, {atom,1306681}, {atom_used,1284164}, {binary,365496}, {code,12859877}, {ets,2419344}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,4738}, {memory_data,{4040077312,4012957696,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26360 kB\nBuffers: 61816 kB\nCached: 3529528 kB\nSwapCached: 0 kB\nActive: 309624 kB\nInactive: 3442144 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26360 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 84 kB\nWriteback: 0 kB\nAnonPages: 160448 kB\nMapped: 24872 kB\nSlab: 134352 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580356 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3614236672}, {buffered_memory,63299584}, {free_memory,26992640}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{4733234,0}}, {context_switches,{1431418,0}}, {garbage_collection,{756455,1048111561,0}}, {io,{{input,25199755},{output,54359539}}}, {reductions,{304961307,609360}}, {run_queue,0}, {runtime,{61730,160}}]}]}] [stats:error] [2012-03-26 2:21:59] [ns_1@127.0.0.1:<0.18583.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:00] [ns_1@127.0.0.1:<0.18589.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:22:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18621.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:22:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18657.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:22:00] [ns_1@127.0.0.1:<0.18648.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:00] [ns_1@127.0.0.1:<0.18633.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:01] [ns_1@127.0.0.1:<0.18599.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:01] [ns_1@127.0.0.1:<0.18648.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:02] [ns_1@127.0.0.1:<0.18601.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:02] [ns_1@127.0.0.1:<0.18604.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:02] [ns_1@127.0.0.1:<0.18648.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:03] [ns_1@127.0.0.1:<0.18643.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:03] [ns_1@127.0.0.1:<0.18612.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:22:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.18648.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:22:04] [ns_1@127.0.0.1:<0.18614.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:04] [ns_1@127.0.0.1:<0.18616.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:05] [ns_1@127.0.0.1:<0.18661.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:05] [ns_1@127.0.0.1:<0.18624.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:22:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18657.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:22:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18690.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:22:06] [ns_1@127.0.0.1:<0.18626.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:06] [ns_1@127.0.0.1:<0.18631.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:07] [ns_1@127.0.0.1:<0.18676.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:07] [ns_1@127.0.0.1:<0.18637.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:08] [ns_1@127.0.0.1:<0.18639.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:08] [ns_1@127.0.0.1:<0.18641.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:09] [ns_1@127.0.0.1:<0.18687.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:09] [ns_1@127.0.0.1:<0.18703.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:09] [ns_1@127.0.0.1:<0.18652.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:09] [ns_1@127.0.0.1:<0.18668.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:09] [ns_1@127.0.0.1:<0.18681.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:09] [ns_1@127.0.0.1:<0.18694.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:09] [ns_1@127.0.0.1:<0.18728.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:10] [ns_1@127.0.0.1:<0.18654.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:10] [ns_1@127.0.0.1:<0.18728.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:22:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18690.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:22:10] [ns_1@127.0.0.1:<0.18659.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:info] [2012-03-26 2:22:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18734.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:22:11] [ns_1@127.0.0.1:<0.18728.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:12] [ns_1@127.0.0.1:<0.18707.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:12] [ns_1@127.0.0.1:<0.18683.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:12] [ns_1@127.0.0.1:<0.18728.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:13] [ns_1@127.0.0.1:<0.18685.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:13] [ns_1@127.0.0.1:<0.18722.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:22:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.18728.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:22:14] [ns_1@127.0.0.1:<0.18731.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:14] [ns_1@127.0.0.1:<0.18699.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:15] [ns_1@127.0.0.1:<0.18701.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:15] [ns_1@127.0.0.1:<0.18744.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:22:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18734.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:22:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18766.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:22:16] [ns_1@127.0.0.1:<0.18670.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:16] [ns_1@127.0.0.1:<0.18709.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:17] [ns_1@127.0.0.1:<0.18712.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:17] [ns_1@127.0.0.1:<0.18756.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:18] [ns_1@127.0.0.1:<0.18746.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:18] [ns_1@127.0.0.1:<0.18735.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:19] [ns_1@127.0.0.1:<0.18720.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:19] [ns_1@127.0.0.1:<0.18769.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:19] [ns_1@127.0.0.1:<0.18792.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:20] [ns_1@127.0.0.1:<0.18758.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:22:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18766.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:22:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18798.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:22:20] [ns_1@127.0.0.1:<0.18674.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:20] [ns_1@127.0.0.1:<0.18792.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:21] [ns_1@127.0.0.1:<0.18751.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:21] [ns_1@127.0.0.1:<0.18782.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:21] [ns_1@127.0.0.1:<0.18792.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:22] [ns_1@127.0.0.1:<0.18773.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:22] [ns_1@127.0.0.1:<0.18749.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:22] [ns_1@127.0.0.1:<0.18792.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:23] [ns_1@127.0.0.1:<0.18763.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:23] [ns_1@127.0.0.1:<0.18795.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:22:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.18792.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:22:24] [ns_1@127.0.0.1:<0.18784.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:24] [ns_1@127.0.0.1:<0.18761.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:25] [ns_1@127.0.0.1:<0.18777.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:22:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18798.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:22:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18833.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:22:25] [ns_1@127.0.0.1:<0.18813.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:26] [ns_1@127.0.0.1:<0.18799.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:26] [ns_1@127.0.0.1:<0.18775.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:27] [ns_1@127.0.0.1:<0.18788.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:27] [ns_1@127.0.0.1:<0.18826.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:28] [ns_1@127.0.0.1:<0.18815.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:28] [ns_1@127.0.0.1:<0.18714.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:29] [ns_1@127.0.0.1:<0.18808.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:29] [ns_1@127.0.0.1:<0.18859.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:29] [ns_1@127.0.0.1:<0.18838.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:30] [ns_1@127.0.0.1:<0.18828.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:22:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18833.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:22:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18868.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:22:30] [ns_1@127.0.0.1:<0.18859.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:30] [ns_1@127.0.0.1:<0.18716.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:31] [ns_1@127.0.0.1:<0.18820.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:31] [ns_1@127.0.0.1:<0.18859.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:31] [ns_1@127.0.0.1:<0.18851.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:32] [ns_1@127.0.0.1:<0.18842.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:32] [ns_1@127.0.0.1:<0.18859.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:32] [ns_1@127.0.0.1:<0.18718.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:33] [ns_1@127.0.0.1:<0.18834.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:22:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.18859.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:22:33] [ns_1@127.0.0.1:<0.18865.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:34] [ns_1@127.0.0.1:<0.18853.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:34] [ns_1@127.0.0.1:<0.18786.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:35] [ns_1@127.0.0.1:<0.18847.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:22:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18868.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:22:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18901.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:22:35] [ns_1@127.0.0.1:<0.18881.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:36] [ns_1@127.0.0.1:<0.18870.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:36] [ns_1@127.0.0.1:<0.18803.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:37] [ns_1@127.0.0.1:<0.18862.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:37] [ns_1@127.0.0.1:<0.18894.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:38] [ns_1@127.0.0.1:<0.18885.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:38] [ns_1@127.0.0.1:<0.18818.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:39] [ns_1@127.0.0.1:<0.18879.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:39] [ns_1@127.0.0.1:<0.18929.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:39] [ns_1@127.0.0.1:<0.18907.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:40] [ns_1@127.0.0.1:<0.18896.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:22:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18901.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:22:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18937.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:22:40] [ns_1@127.0.0.1:<0.18929.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:40] [ns_1@127.0.0.1:<0.18830.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:41] [ns_1@127.0.0.1:<0.18892.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:41] [ns_1@127.0.0.1:<0.18929.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:41] [ns_1@127.0.0.1:<0.18920.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:42] [ns_1@127.0.0.1:<0.18912.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:42] [ns_1@127.0.0.1:<0.18929.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:42] [ns_1@127.0.0.1:<0.18845.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:43] [ns_1@127.0.0.1:<0.18905.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:22:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.18929.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:22:43] [ns_1@127.0.0.1:<0.18934.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:44] [ns_1@127.0.0.1:<0.18923.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:45] [ns_1@127.0.0.1:<0.18855.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:45] [ns_1@127.0.0.1:<0.18918.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:22:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18937.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:22:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.18971.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:22:46] [ns_1@127.0.0.1:<0.18951.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:46] [ns_1@127.0.0.1:<0.18939.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:47] [ns_1@127.0.0.1:<0.18872.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:47] [ns_1@127.0.0.1:<0.18932.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:48] [ns_1@127.0.0.1:<0.18963.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:48] [ns_1@127.0.0.1:<0.18954.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:49] [ns_1@127.0.0.1:<0.18887.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:49] [ns_1@127.0.0.1:<0.18949.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:49] [ns_1@127.0.0.1:<0.18997.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:50] [ns_1@127.0.0.1:<0.18978.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:50] [ns_1@127.0.0.1:<0.18966.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:22:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.18971.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:22:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19005.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:22:50] [ns_1@127.0.0.1:<0.18997.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:51] [ns_1@127.0.0.1:<0.18898.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:51] [ns_1@127.0.0.1:<0.18961.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:51] [ns_1@127.0.0.1:<0.18997.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:52] [ns_1@127.0.0.1:<0.18989.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:52] [ns_1@127.0.0.1:<0.18980.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:52] [ns_1@127.0.0.1:<0.18997.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:53] [ns_1@127.0.0.1:<0.18914.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:53] [ns_1@127.0.0.1:<0.18974.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:22:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.18997.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:22:54] [ns_1@127.0.0.1:<0.19002.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:54] [ns_1@127.0.0.1:<0.18991.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:55] [ns_1@127.0.0.1:<0.18925.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:55] [ns_1@127.0.0.1:<0.18987.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:22:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19005.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:22:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19040.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:22:56] [ns_1@127.0.0.1:<0.19020.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:56] [ns_1@127.0.0.1:<0.19008.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:57] [ns_1@127.0.0.1:<0.18941.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:57] [ns_1@127.0.0.1:<0.19000.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:58] [ns_1@127.0.0.1:<0.19033.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:58] [ns_1@127.0.0.1:<0.19023.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:59] [ns_1@127.0.0.1:<0.18956.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:22:59] [ns_1@127.0.0.1:<0.19018.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:22:59] [ns_1@127.0.0.1:<0.19067.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:22:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,753776,528761}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38300816}, {processes,10278648}, {processes_used,8653168}, {system,28022168}, {atom,1306681}, {atom_used,1284164}, {binary,369080}, {code,12859877}, {ets,2447856}]}, {system_stats, [{cpu_utilization_rate,25.25}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,4798}, {memory_data,{4040077312,4013084672,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26352 kB\nBuffers: 61904 kB\nCached: 3529684 kB\nSwapCached: 0 kB\nActive: 309732 kB\nInactive: 3442316 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26352 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 72 kB\nWriteback: 0 kB\nAnonPages: 160456 kB\nMapped: 24872 kB\nSlab: 134328 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580356 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3614396416}, {buffered_memory,63389696}, {free_memory,26984448}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{4793263,0}}, {context_switches,{1444448,0}}, {garbage_collection,{763762,1059389040,0}}, {io,{{input,25230353},{output,54802494}}}, {reductions,{307575359,629239}}, {run_queue,0}, {runtime,{62320,130}}]}]}] [stats:error] [2012-03-26 2:23:00] [ns_1@127.0.0.1:<0.19047.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:23:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19040.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:23:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19074.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:23:00] [ns_1@127.0.0.1:<0.19035.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:00] [ns_1@127.0.0.1:<0.19067.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:01] [ns_1@127.0.0.1:<0.18968.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:01] [ns_1@127.0.0.1:<0.19031.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:01] [ns_1@127.0.0.1:<0.19067.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:02] [ns_1@127.0.0.1:<0.19058.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:02] [ns_1@127.0.0.1:<0.19050.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:02] [ns_1@127.0.0.1:<0.19067.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:03] [ns_1@127.0.0.1:<0.18982.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:03] [ns_1@127.0.0.1:<0.19043.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:23:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19067.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:23:04] [ns_1@127.0.0.1:<0.19075.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:04] [ns_1@127.0.0.1:<0.19060.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:05] [ns_1@127.0.0.1:<0.18993.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:23:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19074.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:23:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19107.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:23:05] [ns_1@127.0.0.1:<0.19056.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:06] [ns_1@127.0.0.1:<0.19089.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:06] [ns_1@127.0.0.1:<0.19078.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:07] [ns_1@127.0.0.1:<0.19013.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:07] [ns_1@127.0.0.1:<0.19071.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:08] [ns_1@127.0.0.1:<0.19102.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:08] [ns_1@127.0.0.1:<0.19093.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:09] [ns_1@127.0.0.1:<0.19025.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:09] [ns_1@127.0.0.1:<0.19062.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:09] [ns_1@127.0.0.1:<0.19082.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:09] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 2:23:09] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 2:23:09] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:23:09] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:23:09] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:warn] [2012-03-26 2:23:14] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 2:23:14] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 2:23:14] [ns_1@127.0.0.1:<0.19095.1>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 2:23:14] [ns_1@127.0.0.1:<0.19120.1>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 2:23:14] [ns_1@127.0.0.1:<0.19171.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:14] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 2:23:14] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 2:23:14] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:23:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:23:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 2:23:14] [ns_1@127.0.0.1:<0.19131.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:14] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 2:23:14] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:23:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:23:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 2:23:14] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 02:23:16: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 2:23:14] [ns_1@127.0.0.1:<0.19133.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:15] [ns_1@127.0.0.1:<0.19171.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:15] [ns_1@127.0.0.1:<0.19110.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:23:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19107.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:23:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19188.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:23:15] [ns_1@127.0.0.1:<0.19087.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:16] [ns_1@127.0.0.1:<0.19171.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:16] [ns_1@127.0.0.1:<0.19182.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:16] [ns_1@127.0.0.1:<0.19136.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:17] [ns_1@127.0.0.1:<0.19171.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:17] [ns_1@127.0.0.1:<0.19124.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:18] [ns_1@127.0.0.1:<0.19100.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:18] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:23:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19171.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:23:18] [ns_1@127.0.0.1:<0.19198.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:19] [ns_1@127.0.0.1:<0.19139.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:19] [ns_1@127.0.0.1:<0.19191.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:19] [ns_1@127.0.0.1:<0.19217.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:20] [ns_1@127.0.0.1:<0.19113.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:20] [ns_1@127.0.0.1:<0.19211.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:23:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19188.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:23:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19225.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:23:20] [ns_1@127.0.0.1:<0.19217.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:21] [ns_1@127.0.0.1:<0.19141.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:21] [ns_1@127.0.0.1:<0.19206.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:21] [ns_1@127.0.0.1:<0.19217.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:22] [ns_1@127.0.0.1:<0.19126.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:22] [ns_1@127.0.0.1:<0.19228.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:22] [ns_1@127.0.0.1:<0.19217.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:23] [ns_1@127.0.0.1:<0.19143.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:23] [ns_1@127.0.0.1:<0.19220.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:23:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19217.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:23:24] [ns_1@127.0.0.1:<0.19164.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:24] [ns_1@127.0.0.1:<0.19243.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:25] [ns_1@127.0.0.1:<0.19185.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:25] [ns_1@127.0.0.1:<0.19238.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:23:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19225.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:23:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19260.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:23:26] [ns_1@127.0.0.1:<0.19165.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:26] [ns_1@127.0.0.1:<0.19255.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:27] [ns_1@127.0.0.1:<0.19201.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:27] [ns_1@127.0.0.1:<0.19251.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:28] [ns_1@127.0.0.1:<0.19166.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:28] [ns_1@127.0.0.1:<0.19270.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:29] [ns_1@127.0.0.1:<0.19213.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:29] [ns_1@127.0.0.1:<0.19263.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:29] [ns_1@127.0.0.1:<0.19286.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:30] [ns_1@127.0.0.1:<0.19167.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:23:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19260.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:23:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19293.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:23:30] [ns_1@127.0.0.1:<0.19280.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:30] [ns_1@127.0.0.1:<0.19286.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:31] [ns_1@127.0.0.1:<0.19233.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:31] [ns_1@127.0.0.1:<0.19276.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:31] [ns_1@127.0.0.1:<0.19286.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:32] [ns_1@127.0.0.1:<0.19175.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:32] [ns_1@127.0.0.1:<0.19297.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:32] [ns_1@127.0.0.1:<0.19286.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:33] [ns_1@127.0.0.1:<0.19245.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:33] [ns_1@127.0.0.1:<0.19290.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:23:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19286.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:23:34] [ns_1@127.0.0.1:<0.19194.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:34] [ns_1@127.0.0.1:<0.19312.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:35] [ns_1@127.0.0.1:<0.19257.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:23:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19293.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:23:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19326.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:23:35] [ns_1@127.0.0.1:<0.19306.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:36] [ns_1@127.0.0.1:<0.19209.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:36] [ns_1@127.0.0.1:<0.19323.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:37] [ns_1@127.0.0.1:<0.19272.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:37] [ns_1@127.0.0.1:<0.19319.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:38] [ns_1@127.0.0.1:<0.19222.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:38] [ns_1@127.0.0.1:<0.19339.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:39] [ns_1@127.0.0.1:<0.19282.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:39] [ns_1@127.0.0.1:<0.19354.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:39] [ns_1@127.0.0.1:<0.19332.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:40] [ns_1@127.0.0.1:<0.19240.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:23:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19326.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:23:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19362.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:23:40] [ns_1@127.0.0.1:<0.19354.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:40] [ns_1@127.0.0.1:<0.19350.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:41] [ns_1@127.0.0.1:<0.19301.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:41] [ns_1@127.0.0.1:<0.19354.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:41] [ns_1@127.0.0.1:<0.19345.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:42] [ns_1@127.0.0.1:<0.19253.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:42] [ns_1@127.0.0.1:<0.19354.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:42] [ns_1@127.0.0.1:<0.19366.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:43] [ns_1@127.0.0.1:<0.19314.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:23:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19354.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:23:43] [ns_1@127.0.0.1:<0.19359.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:44] [ns_1@127.0.0.1:<0.19267.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:44] [ns_1@127.0.0.1:<0.19381.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:45] [ns_1@127.0.0.1:<0.19327.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:23:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19362.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:23:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19396.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:23:45] [ns_1@127.0.0.1:<0.19376.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:46] [ns_1@127.0.0.1:<0.19278.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:46] [ns_1@127.0.0.1:<0.19393.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:47] [ns_1@127.0.0.1:<0.19343.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:47] [ns_1@127.0.0.1:<0.19388.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:48] [ns_1@127.0.0.1:<0.19294.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:48] [ns_1@127.0.0.1:<0.19407.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:49] [ns_1@127.0.0.1:<0.19357.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:49] [ns_1@127.0.0.1:<0.19422.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:49] [ns_1@127.0.0.1:<0.19401.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:50] [ns_1@127.0.0.1:<0.19308.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:23:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19396.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:23:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19430.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:23:50] [ns_1@127.0.0.1:<0.19422.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:50] [ns_1@127.0.0.1:<0.19418.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:51] [ns_1@127.0.0.1:<0.19374.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:51] [ns_1@127.0.0.1:<0.19422.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:51] [ns_1@127.0.0.1:<0.19414.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:52] [ns_1@127.0.0.1:<0.19321.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:52] [ns_1@127.0.0.1:<0.19422.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:53] [ns_1@127.0.0.1:<0.19435.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:53] [ns_1@127.0.0.1:<0.19386.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:23:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19422.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:23:54] [ns_1@127.0.0.1:<0.19427.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:54] [ns_1@127.0.0.1:<0.19337.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:55] [ns_1@127.0.0.1:<0.19450.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:55] [ns_1@127.0.0.1:<0.19399.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:23:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19430.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:23:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19465.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:23:56] [ns_1@127.0.0.1:<0.19445.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:56] [ns_1@127.0.0.1:<0.19347.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:57] [ns_1@127.0.0.1:<0.19462.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:57] [ns_1@127.0.0.1:<0.19412.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:58] [ns_1@127.0.0.1:<0.19458.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:58] [ns_1@127.0.0.1:<0.19364.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:59] [ns_1@127.0.0.1:<0.19477.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:23:59] [ns_1@127.0.0.1:<0.19425.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:23:59] [ns_1@127.0.0.1:<0.19507.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:23:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,753836,556326}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38208896}, {processes,10181760}, {processes_used,8556280}, {system,28027136}, {atom,1306681}, {atom_used,1284164}, {binary,395792}, {code,12859877}, {ets,2419192}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,4858}, {memory_data,{4040077312,4013211648,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25740 kB\nBuffers: 61992 kB\nCached: 3529844 kB\nSwapCached: 0 kB\nActive: 309864 kB\nInactive: 3442436 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25740 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 92 kB\nWriteback: 0 kB\nAnonPages: 160472 kB\nMapped: 24872 kB\nSlab: 134336 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580356 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3614560256}, {buffered_memory,63479808}, {free_memory,26357760}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{4853292,0}}, {context_switches,{1457720,0}}, {garbage_collection,{770979,1071079158,0}}, {io,{{input,25266824},{output,55291077}}}, {reductions,{310216030,642436}}, {run_queue,0}, {runtime,{62970,150}}]}]}] [stats:error] [2012-03-26 2:24:00] [ns_1@127.0.0.1:<0.19472.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:00] [ns_1@127.0.0.1:<0.19379.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:24:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19465.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:24:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19516.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:24:00] [ns_1@127.0.0.1:<0.19507.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:01] [ns_1@127.0.0.1:<0.19496.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:01] [ns_1@127.0.0.1:<0.19443.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:01] [ns_1@127.0.0.1:<0.19507.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:02] [ns_1@127.0.0.1:<0.19483.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:02] [ns_1@127.0.0.1:<0.19391.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:02] [ns_1@127.0.0.1:<0.19507.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:03] [ns_1@127.0.0.1:<0.19522.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:03] [ns_1@127.0.0.1:<0.19456.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:24:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19507.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:24:04] [ns_1@127.0.0.1:<0.19513.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:04] [ns_1@127.0.0.1:<0.19405.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:05] [ns_1@127.0.0.1:<0.19535.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:24:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19516.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:24:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19547.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:24:05] [ns_1@127.0.0.1:<0.19468.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:06] [ns_1@127.0.0.1:<0.19529.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:06] [ns_1@127.0.0.1:<0.19416.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:07] [ns_1@127.0.0.1:<0.19548.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:07] [ns_1@127.0.0.1:<0.19481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:08] [ns_1@127.0.0.1:<0.19542.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:08] [ns_1@127.0.0.1:<0.19433.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:09] [ns_1@127.0.0.1:<0.19562.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:09] [ns_1@127.0.0.1:<0.19511.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:09] [ns_1@127.0.0.1:<0.19579.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:10] [ns_1@127.0.0.1:<0.19558.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:24:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19547.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:24:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19585.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:24:10] [ns_1@127.0.0.1:<0.19448.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:10] [ns_1@127.0.0.1:<0.19579.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:11] [ns_1@127.0.0.1:<0.19573.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:11] [ns_1@127.0.0.1:<0.19579.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:11] [ns_1@127.0.0.1:<0.19527.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:12] [ns_1@127.0.0.1:<0.19568.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:12] [ns_1@127.0.0.1:<0.19579.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:12] [ns_1@127.0.0.1:<0.19460.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:13] [ns_1@127.0.0.1:<0.19595.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:24:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19579.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:24:13] [ns_1@127.0.0.1:<0.19540.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:14] [ns_1@127.0.0.1:<0.19586.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:14] [ns_1@127.0.0.1:<0.19475.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:24:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19585.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:24:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19617.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:24:19] [ns_1@127.0.0.1:<0.19627.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:24:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19617.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:24:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19631.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:24:20] [ns_1@127.0.0.1:<0.19627.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:21] [ns_1@127.0.0.1:<0.19627.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:22] [ns_1@127.0.0.1:<0.19627.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:24:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19627.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:24:24] [ns_1@127.0.0.1:<0.19485.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:25] [ns_1@127.0.0.1:<0.19609.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:24:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19631.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:24:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19650.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:24:25] [ns_1@127.0.0.1:<0.19553.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:26] [ns_1@127.0.0.1:<0.19602.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:26] [ns_1@127.0.0.1:<0.19518.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:27] [ns_1@127.0.0.1:<0.19653.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:27] [ns_1@127.0.0.1:<0.19566.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:28] [ns_1@127.0.0.1:<0.19614.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:28] [ns_1@127.0.0.1:<0.19533.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:29] [ns_1@127.0.0.1:<0.19666.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:29] [ns_1@127.0.0.1:<0.19676.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:29] [ns_1@127.0.0.1:<0.19582.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:30] [ns_1@127.0.0.1:<0.19645.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:24:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19650.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:24:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19685.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:24:30] [ns_1@127.0.0.1:<0.19676.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:30] [ns_1@127.0.0.1:<0.19544.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:31] [ns_1@127.0.0.1:<0.19680.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:31] [ns_1@127.0.0.1:<0.19676.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:31] [ns_1@127.0.0.1:<0.19599.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:32] [ns_1@127.0.0.1:<0.19660.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:32] [ns_1@127.0.0.1:<0.19676.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:32] [ns_1@127.0.0.1:<0.19560.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:33] [ns_1@127.0.0.1:<0.19696.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:24:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19676.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:24:33] [ns_1@127.0.0.1:<0.19611.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:34] [ns_1@127.0.0.1:<0.19670.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:34] [ns_1@127.0.0.1:<0.19571.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:35] [ns_1@127.0.0.1:<0.19709.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:24:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19685.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:24:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19718.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:24:36] [ns_1@127.0.0.1:<0.19655.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:36] [ns_1@127.0.0.1:<0.19687.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:37] [ns_1@127.0.0.1:<0.19589.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:37] [ns_1@127.0.0.1:<0.19722.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:38] [ns_1@127.0.0.1:<0.19668.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:38] [ns_1@127.0.0.1:<0.19702.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:39] [ns_1@127.0.0.1:<0.19604.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:39] [ns_1@127.0.0.1:<0.19735.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:39] [ns_1@127.0.0.1:<0.19746.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:40] [ns_1@127.0.0.1:<0.19682.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:40] [ns_1@127.0.0.1:<0.19713.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:24:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19718.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:24:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19754.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:24:40] [ns_1@127.0.0.1:<0.19746.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:41] [ns_1@127.0.0.1:<0.19647.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:41] [ns_1@127.0.0.1:<0.19749.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:41] [ns_1@127.0.0.1:<0.19746.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:42] [ns_1@127.0.0.1:<0.19698.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:42] [ns_1@127.0.0.1:<0.19729.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:42] [ns_1@127.0.0.1:<0.19746.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:43] [ns_1@127.0.0.1:<0.19662.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:43] [ns_1@127.0.0.1:<0.19766.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:24:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19746.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:24:44] [ns_1@127.0.0.1:<0.19711.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:44] [ns_1@127.0.0.1:<0.19740.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:45] [ns_1@127.0.0.1:<0.19672.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:45] [ns_1@127.0.0.1:<0.19778.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:24:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19754.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:24:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19788.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:24:46] [ns_1@127.0.0.1:<0.19724.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:46] [ns_1@127.0.0.1:<0.19756.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:47] [ns_1@127.0.0.1:<0.19689.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:47] [ns_1@127.0.0.1:<0.19791.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:48] [ns_1@127.0.0.1:<0.19737.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:48] [ns_1@127.0.0.1:<0.19771.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:49] [ns_1@127.0.0.1:<0.19704.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:49] [ns_1@127.0.0.1:<0.19804.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:49] [ns_1@127.0.0.1:<0.19814.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:50] [ns_1@127.0.0.1:<0.19751.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:24:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19788.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:24:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19820.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:24:50] [ns_1@127.0.0.1:<0.19783.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:50] [ns_1@127.0.0.1:<0.19814.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:51] [ns_1@127.0.0.1:<0.19715.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:51] [ns_1@127.0.0.1:<0.19817.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:51] [ns_1@127.0.0.1:<0.19814.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:52] [ns_1@127.0.0.1:<0.19768.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:52] [ns_1@127.0.0.1:<0.19797.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:52] [ns_1@127.0.0.1:<0.19814.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:53] [ns_1@127.0.0.1:<0.19731.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:53] [ns_1@127.0.0.1:<0.19835.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:24:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19814.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:24:54] [ns_1@127.0.0.1:<0.19780.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:54] [ns_1@127.0.0.1:<0.19808.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:55] [ns_1@127.0.0.1:<0.19742.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:24:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19820.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:24:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19855.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:24:55] [ns_1@127.0.0.1:<0.19848.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:56] [ns_1@127.0.0.1:<0.19795.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:56] [ns_1@127.0.0.1:<0.19825.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:57] [ns_1@127.0.0.1:<0.19760.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:57] [ns_1@127.0.0.1:<0.19860.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:58] [ns_1@127.0.0.1:<0.19806.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:58] [ns_1@127.0.0.1:<0.19840.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:24:59] [ns_1@127.0.0.1:<0.19773.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:24:59] [ns_1@127.0.0.1:<0.19882.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:24:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,753896,584371}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38039136}, {processes,9975056}, {processes_used,8349576}, {system,28064080}, {atom,1306681}, {atom_used,1284164}, {binary,396480}, {code,12859877}, {ets,2450288}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,4918}, {memory_data,{4040077312,4013719552,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25244 kB\nBuffers: 62108 kB\nCached: 3529956 kB\nSwapCached: 0 kB\nActive: 309892 kB\nInactive: 3442616 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25244 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 88 kB\nWriteback: 0 kB\nAnonPages: 160480 kB\nMapped: 24872 kB\nSlab: 134332 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580356 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3614674944}, {buffered_memory,63598592}, {free_memory,25849856}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{4913321,0}}, {context_switches,{1470129,0}}, {garbage_collection,{777657,1080889555,0}}, {io,{{input,25552143},{output,55958656}}}, {reductions,{312582413,611741}}, {run_queue,0}, {runtime,{63570,140}}]}]}] [stats:error] [2012-03-26 2:24:59] [ns_1@127.0.0.1:<0.19873.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:00] [ns_1@127.0.0.1:<0.19821.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:25:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19855.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:25:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19891.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:25:00] [ns_1@127.0.0.1:<0.19882.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:00] [ns_1@127.0.0.1:<0.19852.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:01] [ns_1@127.0.0.1:<0.19785.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:01] [ns_1@127.0.0.1:<0.19882.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:01] [ns_1@127.0.0.1:<0.19888.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:02] [ns_1@127.0.0.1:<0.19837.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:02] [ns_1@127.0.0.1:<0.19882.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:02] [ns_1@127.0.0.1:<0.19867.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:03] [ns_1@127.0.0.1:<0.19799.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:25:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19882.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:25:03] [ns_1@127.0.0.1:<0.19904.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:04] [ns_1@127.0.0.1:<0.19850.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:04] [ns_1@127.0.0.1:<0.19877.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:05] [ns_1@127.0.0.1:<0.19810.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:25:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19891.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:25:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19924.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:25:05] [ns_1@127.0.0.1:<0.19917.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:06] [ns_1@127.0.0.1:<0.19864.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:06] [ns_1@127.0.0.1:<0.19895.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:07] [ns_1@127.0.0.1:<0.19830.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:07] [ns_1@127.0.0.1:<0.19930.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:08] [ns_1@127.0.0.1:<0.19875.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:08] [ns_1@127.0.0.1:<0.19910.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:09] [ns_1@127.0.0.1:<0.19842.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:09] [ns_1@127.0.0.1:<0.19954.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:09] [ns_1@127.0.0.1:<0.19943.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:10] [ns_1@127.0.0.1:<0.19893.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:25:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19924.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:25:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19962.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:25:10] [ns_1@127.0.0.1:<0.19954.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:11] [ns_1@127.0.0.1:<0.19921.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:11] [ns_1@127.0.0.1:<0.19858.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:11] [ns_1@127.0.0.1:<0.19954.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:12] [ns_1@127.0.0.1:<0.19959.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:12] [ns_1@127.0.0.1:<0.19908.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:12] [ns_1@127.0.0.1:<0.19954.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:13] [ns_1@127.0.0.1:<0.19937.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:13] [ns_1@127.0.0.1:<0.19871.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:25:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.19954.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:25:14] [ns_1@127.0.0.1:<0.19976.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:14] [ns_1@127.0.0.1:<0.19919.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:15] [ns_1@127.0.0.1:<0.19948.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:15] [ns_1@127.0.0.1:<0.19886.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:25:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19962.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:25:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.19996.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:25:16] [ns_1@127.0.0.1:<0.19988.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:16] [ns_1@127.0.0.1:<0.19935.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:17] [ns_1@127.0.0.1:<0.19966.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:17] [ns_1@127.0.0.1:<0.19902.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:18] [ns_1@127.0.0.1:<0.20001.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:18] [ns_1@127.0.0.1:<0.19946.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:19] [ns_1@127.0.0.1:<0.19981.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:19] [ns_1@127.0.0.1:<0.19915.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:19] [ns_1@127.0.0.1:<0.20022.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:20] [ns_1@127.0.0.1:<0.20014.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:20] [ns_1@127.0.0.1:<0.19964.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:25:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.19996.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:25:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20030.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:25:20] [ns_1@127.0.0.1:<0.20022.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:21] [ns_1@127.0.0.1:<0.19993.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:21] [ns_1@127.0.0.1:<0.19928.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:21] [ns_1@127.0.0.1:<0.20022.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:22] [ns_1@127.0.0.1:<0.20027.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:22] [ns_1@127.0.0.1:<0.19979.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:22] [ns_1@127.0.0.1:<0.20022.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:23] [ns_1@127.0.0.1:<0.20007.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:23] [ns_1@127.0.0.1:<0.19941.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:25:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20022.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:25:24] [ns_1@127.0.0.1:<0.20045.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:24] [ns_1@127.0.0.1:<0.19991.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:25] [ns_1@127.0.0.1:<0.20018.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:25] [ns_1@127.0.0.1:<0.20038.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:25] [ns_1@127.0.0.1:<0.20050.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:25:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20030.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:25:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20067.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:25:25] [ns_1@127.0.0.1:<0.19957.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:26] [ns_1@127.0.0.1:<0.20058.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:26] [ns_1@127.0.0.1:<0.20005.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:27] [ns_1@127.0.0.1:<0.20068.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:27] [ns_1@127.0.0.1:<0.19974.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:28] [ns_1@127.0.0.1:<0.20076.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:28] [ns_1@127.0.0.1:<0.20016.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:29] [ns_1@127.0.0.1:<0.20081.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:29] [ns_1@127.0.0.1:<0.19986.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:29] [ns_1@127.0.0.1:<0.20095.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:30] [ns_1@127.0.0.1:<0.20087.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:25:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20067.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:25:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20102.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:25:30] [ns_1@127.0.0.1:<0.20095.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:30] [ns_1@127.0.0.1:<0.20033.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:31] [ns_1@127.0.0.1:<0.20091.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:31] [ns_1@127.0.0.1:<0.20095.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:31] [ns_1@127.0.0.1:<0.19999.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:32] [ns_1@127.0.0.1:<0.20104.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:32] [ns_1@127.0.0.1:<0.20095.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:32] [ns_1@127.0.0.1:<0.20048.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:33] [ns_1@127.0.0.1:<0.20113.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:25:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20095.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:25:33] [ns_1@127.0.0.1:<0.20012.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:34] [ns_1@127.0.0.1:<0.20118.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:34] [ns_1@127.0.0.1:<0.20060.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:35] [ns_1@127.0.0.1:<0.20126.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:25:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20102.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:25:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20135.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:25:35] [ns_1@127.0.0.1:<0.20025.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:36] [ns_1@127.0.0.1:<0.20130.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:36] [ns_1@127.0.0.1:<0.20062.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:37] [ns_1@127.0.0.1:<0.20139.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:37] [ns_1@127.0.0.1:<0.20043.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:38] [ns_1@127.0.0.1:<0.20146.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:38] [ns_1@127.0.0.1:<0.20064.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:39] [ns_1@127.0.0.1:<0.20152.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:39] [ns_1@127.0.0.1:<0.20163.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:39] [ns_1@127.0.0.1:<0.20056.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:40] [ns_1@127.0.0.1:<0.20157.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:25:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20135.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:25:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20171.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:25:40] [ns_1@127.0.0.1:<0.20163.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:40] [ns_1@127.0.0.1:<0.20079.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:41] [ns_1@127.0.0.1:<0.20166.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:41] [ns_1@127.0.0.1:<0.20163.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:41] [ns_1@127.0.0.1:<0.20072.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:42] [ns_1@127.0.0.1:<0.20173.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:42] [ns_1@127.0.0.1:<0.20163.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:42] [ns_1@127.0.0.1:<0.20089.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:43] [ns_1@127.0.0.1:<0.20183.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:25:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20163.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:25:43] [ns_1@127.0.0.1:<0.20085.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:44] [ns_1@127.0.0.1:<0.20188.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:44] [ns_1@127.0.0.1:<0.20106.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:45] [ns_1@127.0.0.1:<0.20195.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:25:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20171.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:25:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20205.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:25:46] [ns_1@127.0.0.1:<0.20099.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:46] [ns_1@127.0.0.1:<0.20200.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:47] [ns_1@127.0.0.1:<0.20121.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:47] [ns_1@127.0.0.1:<0.20208.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:48] [ns_1@127.0.0.1:<0.20115.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:48] [ns_1@127.0.0.1:<0.20214.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:49] [ns_1@127.0.0.1:<0.20132.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:49] [ns_1@127.0.0.1:<0.20221.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:49] [ns_1@127.0.0.1:<0.20231.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:50] [ns_1@127.0.0.1:<0.20128.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:50] [ns_1@127.0.0.1:<0.20225.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:25:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20205.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:25:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20239.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:25:50] [ns_1@127.0.0.1:<0.20231.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:51] [ns_1@127.0.0.1:<0.20148.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:51] [ns_1@127.0.0.1:<0.20234.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:51] [ns_1@127.0.0.1:<0.20231.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:52] [ns_1@127.0.0.1:<0.20141.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:52] [ns_1@127.0.0.1:<0.20242.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:52] [ns_1@127.0.0.1:<0.20231.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:53] [ns_1@127.0.0.1:<0.20159.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:53] [ns_1@127.0.0.1:<0.20252.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:25:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20231.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:25:54] [ns_1@127.0.0.1:<0.20154.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:54] [ns_1@127.0.0.1:<0.20257.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:55] [ns_1@127.0.0.1:<0.20175.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:55] [ns_1@127.0.0.1:<0.20265.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:25:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20239.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:25:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20274.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:25:56] [ns_1@127.0.0.1:<0.20168.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:56] [ns_1@127.0.0.1:<0.20269.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:57] [ns_1@127.0.0.1:<0.20190.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:57] [ns_1@127.0.0.1:<0.20277.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:58] [ns_1@127.0.0.1:<0.20185.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:58] [ns_1@127.0.0.1:<0.20284.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:59] [ns_1@127.0.0.1:<0.20202.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:25:59] [ns_1@127.0.0.1:<0.20290.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:25:59] [ns_1@127.0.0.1:<0.20301.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:25:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,753956,612330}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38216672}, {processes,10177624}, {processes_used,8552144}, {system,28039048}, {atom,1306681}, {atom_used,1284164}, {binary,394512}, {code,12859877}, {ets,2420312}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,4978}, {memory_data,{4040077312,4014227456,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25120 kB\nBuffers: 62188 kB\nCached: 3530120 kB\nSwapCached: 0 kB\nActive: 310012 kB\nInactive: 3442784 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25120 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 80 kB\nWriteback: 0 kB\nAnonPages: 160500 kB\nMapped: 24872 kB\nSlab: 134340 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580356 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3614842880}, {buffered_memory,63680512}, {free_memory,25722880}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{4973348,0}}, {context_switches,{1483217,0}}, {garbage_collection,{784867,1092211980,0}}, {io,{{input,25582750},{output,56404823}}}, {reductions,{315197247,629574}}, {run_queue,0}, {runtime,{64160,140}}]}]}] [stats:error] [2012-03-26 2:26:00] [ns_1@127.0.0.1:<0.20197.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:26:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20274.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:26:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20308.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:26:00] [ns_1@127.0.0.1:<0.20294.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:00] [ns_1@127.0.0.1:<0.20301.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:01] [ns_1@127.0.0.1:<0.20216.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:01] [ns_1@127.0.0.1:<0.20305.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:01] [ns_1@127.0.0.1:<0.20301.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:02] [ns_1@127.0.0.1:<0.20210.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:02] [ns_1@127.0.0.1:<0.20312.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:02] [ns_1@127.0.0.1:<0.20301.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:03] [ns_1@127.0.0.1:<0.20227.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:03] [ns_1@127.0.0.1:<0.20321.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:26:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20301.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:26:04] [ns_1@127.0.0.1:<0.20223.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:04] [ns_1@127.0.0.1:<0.20327.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:05] [ns_1@127.0.0.1:<0.20244.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:26:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20308.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:26:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20341.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:26:05] [ns_1@127.0.0.1:<0.20334.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:06] [ns_1@127.0.0.1:<0.20236.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:06] [ns_1@127.0.0.1:<0.20338.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:07] [ns_1@127.0.0.1:<0.20259.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:07] [ns_1@127.0.0.1:<0.20347.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:08] [ns_1@127.0.0.1:<0.20254.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:08] [ns_1@127.0.0.1:<0.20354.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:09] [ns_1@127.0.0.1:<0.20271.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:09] [ns_1@127.0.0.1:<0.20371.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:09] [ns_1@127.0.0.1:<0.20360.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:10] [ns_1@127.0.0.1:<0.20267.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:26:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20341.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:26:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20379.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:26:10] [ns_1@127.0.0.1:<0.20371.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:10] [ns_1@127.0.0.1:<0.20365.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:11] [ns_1@127.0.0.1:<0.20286.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:11] [ns_1@127.0.0.1:<0.20371.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:11] [ns_1@127.0.0.1:<0.20376.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:12] [ns_1@127.0.0.1:<0.20281.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:12] [ns_1@127.0.0.1:<0.20371.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:12] [ns_1@127.0.0.1:<0.20383.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:13] [ns_1@127.0.0.1:<0.20296.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:26:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20371.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:26:13] [ns_1@127.0.0.1:<0.20393.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:14] [ns_1@127.0.0.1:<0.20292.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:14] [ns_1@127.0.0.1:<0.20398.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:15] [ns_1@127.0.0.1:<0.20316.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:26:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20379.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:26:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20413.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:26:15] [ns_1@127.0.0.1:<0.20405.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:16] [ns_1@127.0.0.1:<0.20309.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:16] [ns_1@127.0.0.1:<0.20410.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:17] [ns_1@127.0.0.1:<0.20329.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:17] [ns_1@127.0.0.1:<0.20418.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:18] [ns_1@127.0.0.1:<0.20323.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:18] [ns_1@127.0.0.1:<0.20424.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:19] [ns_1@127.0.0.1:<0.20344.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:19] [ns_1@127.0.0.1:<0.20439.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:20] [ns_1@127.0.0.1:<0.20431.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:20] [ns_1@127.0.0.1:<0.20336.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:26:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20413.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:26:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20447.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:26:20] [ns_1@127.0.0.1:<0.20439.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:21] [ns_1@127.0.0.1:<0.20435.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:21] [ns_1@127.0.0.1:<0.20358.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:21] [ns_1@127.0.0.1:<0.20439.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:22] [ns_1@127.0.0.1:<0.20444.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:22] [ns_1@127.0.0.1:<0.20352.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:22] [ns_1@127.0.0.1:<0.20439.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:23] [ns_1@127.0.0.1:<0.20452.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:23] [ns_1@127.0.0.1:<0.20374.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:26:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20439.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:26:24] [ns_1@127.0.0.1:<0.20462.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:24] [ns_1@127.0.0.1:<0.20363.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:25] [ns_1@127.0.0.1:<0.20467.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:25] [ns_1@127.0.0.1:<0.20391.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:25] [ns_1@127.0.0.1:<0.20403.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:25] [ns_1@127.0.0.1:<0.20416.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:26:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20447.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:26:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20486.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:26:26] [ns_1@127.0.0.1:<0.20475.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:26] [ns_1@127.0.0.1:<0.20381.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:27] [ns_1@127.0.0.1:<0.20479.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:27] [ns_1@127.0.0.1:<0.20429.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:28] [ns_1@127.0.0.1:<0.20491.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:28] [ns_1@127.0.0.1:<0.20396.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:29] [ns_1@127.0.0.1:<0.20481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:29] [ns_1@127.0.0.1:<0.20442.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:29] [ns_1@127.0.0.1:<0.20512.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:30] [ns_1@127.0.0.1:<0.20504.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:30] [ns_1@127.0.0.1:<0.20408.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:26:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20486.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:26:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20521.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:26:30] [ns_1@127.0.0.1:<0.20512.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:31] [ns_1@127.0.0.1:<0.20483.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:31] [ns_1@127.0.0.1:<0.20460.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:31] [ns_1@127.0.0.1:<0.20512.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:32] [ns_1@127.0.0.1:<0.20518.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:32] [ns_1@127.0.0.1:<0.20422.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:32] [ns_1@127.0.0.1:<0.20512.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:33] [ns_1@127.0.0.1:<0.20498.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:33] [ns_1@127.0.0.1:<0.20473.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:26:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20512.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:26:34] [ns_1@127.0.0.1:<0.20534.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:34] [ns_1@127.0.0.1:<0.20433.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:35] [ns_1@127.0.0.1:<0.20508.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:26:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20521.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:26:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20552.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:26:35] [ns_1@127.0.0.1:<0.20489.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:36] [ns_1@127.0.0.1:<0.20547.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:36] [ns_1@127.0.0.1:<0.20450.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:37] [ns_1@127.0.0.1:<0.20527.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:37] [ns_1@127.0.0.1:<0.20502.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:38] [ns_1@127.0.0.1:<0.20563.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:38] [ns_1@127.0.0.1:<0.20465.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:39] [ns_1@127.0.0.1:<0.20540.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:39] [ns_1@127.0.0.1:<0.20580.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:39] [ns_1@127.0.0.1:<0.20516.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:40] [ns_1@127.0.0.1:<0.20573.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:26:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20552.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:26:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20588.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:26:40] [ns_1@127.0.0.1:<0.20580.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:40] [ns_1@127.0.0.1:<0.20477.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:41] [ns_1@127.0.0.1:<0.20553.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:41] [ns_1@127.0.0.1:<0.20580.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:41] [ns_1@127.0.0.1:<0.20532.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:42] [ns_1@127.0.0.1:<0.20590.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:42] [ns_1@127.0.0.1:<0.20580.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:42] [ns_1@127.0.0.1:<0.20496.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:43] [ns_1@127.0.0.1:<0.20567.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:26:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20580.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:26:43] [ns_1@127.0.0.1:<0.20545.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:44] [ns_1@127.0.0.1:<0.20605.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:44] [ns_1@127.0.0.1:<0.20506.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:45] [ns_1@127.0.0.1:<0.20583.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:26:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20588.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:26:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20622.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:26:45] [ns_1@127.0.0.1:<0.20558.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:46] [ns_1@127.0.0.1:<0.20617.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:46] [ns_1@127.0.0.1:<0.20523.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:47] [ns_1@127.0.0.1:<0.20600.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:47] [ns_1@127.0.0.1:<0.20571.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:48] [ns_1@127.0.0.1:<0.20631.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:48] [ns_1@127.0.0.1:<0.20538.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:49] [ns_1@127.0.0.1:<0.20612.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:49] [ns_1@127.0.0.1:<0.20648.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:49] [ns_1@127.0.0.1:<0.20585.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:50] [ns_1@127.0.0.1:<0.20642.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:26:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20622.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:26:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20656.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:26:50] [ns_1@127.0.0.1:<0.20648.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:50] [ns_1@127.0.0.1:<0.20549.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:51] [ns_1@127.0.0.1:<0.20625.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:51] [ns_1@127.0.0.1:<0.20648.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:51] [ns_1@127.0.0.1:<0.20602.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:52] [ns_1@127.0.0.1:<0.20659.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:52] [ns_1@127.0.0.1:<0.20648.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:52] [ns_1@127.0.0.1:<0.20565.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:53] [ns_1@127.0.0.1:<0.20638.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:26:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20648.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:26:54] [ns_1@127.0.0.1:<0.20614.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:54] [ns_1@127.0.0.1:<0.20674.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:55] [ns_1@127.0.0.1:<0.20576.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:55] [ns_1@127.0.0.1:<0.20651.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:26:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20656.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:26:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20691.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:26:56] [ns_1@127.0.0.1:<0.20627.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:56] [ns_1@127.0.0.1:<0.20686.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:57] [ns_1@127.0.0.1:<0.20592.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:57] [ns_1@127.0.0.1:<0.20669.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:58] [ns_1@127.0.0.1:<0.20640.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:58] [ns_1@127.0.0.1:<0.20701.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:59] [ns_1@127.0.0.1:<0.20607.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:26:59] [ns_1@127.0.0.1:<0.20682.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:26:59] [ns_1@127.0.0.1:<0.20725.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:26:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,754016,640340}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38221480}, {processes,10138376}, {processes_used,8512896}, {system,28083104}, {atom,1306681}, {atom_used,1284164}, {binary,403432}, {code,12859877}, {ets,2449120}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,5038}, {memory_data,{4040077312,4014354432,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 24996 kB\nBuffers: 62256 kB\nCached: 3530276 kB\nSwapCached: 0 kB\nActive: 310112 kB\nInactive: 3442900 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 24996 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 92 kB\nWriteback: 0 kB\nAnonPages: 160516 kB\nMapped: 24872 kB\nSlab: 134340 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580356 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3615002624}, {buffered_memory,63750144}, {free_memory,25595904}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{5033377,1}}, {context_switches,{1496209,0}}, {garbage_collection,{792016,1103362484,0}}, {io,{{input,25613339},{output,56842938}}}, {reductions,{317790978,629893}}, {run_queue,0}, {runtime,{64780,160}}]}]}] [stats:error] [2012-03-26 2:27:00] [ns_1@127.0.0.1:<0.20653.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:00] [ns_1@127.0.0.1:<0.20711.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:27:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20691.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:27:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20734.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:27:00] [ns_1@127.0.0.1:<0.20725.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:01] [ns_1@127.0.0.1:<0.20619.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:01] [ns_1@127.0.0.1:<0.20694.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:01] [ns_1@127.0.0.1:<0.20725.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:02] [ns_1@127.0.0.1:<0.20671.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:02] [ns_1@127.0.0.1:<0.20736.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:02] [ns_1@127.0.0.1:<0.20725.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:03] [ns_1@127.0.0.1:<0.20633.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:03] [ns_1@127.0.0.1:<0.20707.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:27:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20725.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:27:04] [ns_1@127.0.0.1:<0.20684.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:04] [ns_1@127.0.0.1:<0.20751.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:05] [ns_1@127.0.0.1:<0.20644.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:05] [ns_1@127.0.0.1:<0.20729.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:27:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20734.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:27:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20773.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:27:06] [ns_1@127.0.0.1:<0.20696.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:06] [ns_1@127.0.0.1:<0.20768.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:07] [ns_1@127.0.0.1:<0.20661.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:07] [ns_1@127.0.0.1:<0.20745.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:08] [ns_1@127.0.0.1:<0.20709.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:08] [ns_1@127.0.0.1:<0.20784.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:09] [ns_1@127.0.0.1:<0.20676.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:09] [ns_1@127.0.0.1:<0.20764.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:09] [ns_1@127.0.0.1:<0.20801.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:10] [ns_1@127.0.0.1:<0.20731.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:27:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20773.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:27:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20809.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:27:10] [ns_1@127.0.0.1:<0.20795.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:10] [ns_1@127.0.0.1:<0.20801.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:11] [ns_1@127.0.0.1:<0.20688.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:11] [ns_1@127.0.0.1:<0.20777.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:11] [ns_1@127.0.0.1:<0.20801.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:12] [ns_1@127.0.0.1:<0.20747.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:12] [ns_1@127.0.0.1:<0.20813.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:12] [ns_1@127.0.0.1:<0.20801.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:13] [ns_1@127.0.0.1:<0.20703.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:27:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20801.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:27:13] [ns_1@127.0.0.1:<0.20790.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:14] [ns_1@127.0.0.1:<0.20766.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:14] [ns_1@127.0.0.1:<0.20828.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:15] [ns_1@127.0.0.1:<0.20713.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:27:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20809.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:27:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20843.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:27:15] [ns_1@127.0.0.1:<0.20806.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:16] [ns_1@127.0.0.1:<0.20782.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:16] [ns_1@127.0.0.1:<0.20840.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:17] [ns_1@127.0.0.1:<0.20738.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:17] [ns_1@127.0.0.1:<0.20823.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:18] [ns_1@127.0.0.1:<0.20792.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:18] [ns_1@127.0.0.1:<0.20854.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:19] [ns_1@127.0.0.1:<0.20753.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:19] [ns_1@127.0.0.1:<0.20869.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:19] [ns_1@127.0.0.1:<0.20835.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:20] [ns_1@127.0.0.1:<0.20810.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:27:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20843.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:27:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20877.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:27:20] [ns_1@127.0.0.1:<0.20869.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:20] [ns_1@127.0.0.1:<0.20865.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:21] [ns_1@127.0.0.1:<0.20770.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:21] [ns_1@127.0.0.1:<0.20869.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:21] [ns_1@127.0.0.1:<0.20848.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:22] [ns_1@127.0.0.1:<0.20825.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:22] [ns_1@127.0.0.1:<0.20869.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:22] [ns_1@127.0.0.1:<0.20882.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:23] [ns_1@127.0.0.1:<0.20786.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:27:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20869.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:27:23] [ns_1@127.0.0.1:<0.20861.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:24] [ns_1@127.0.0.1:<0.20837.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:24] [ns_1@127.0.0.1:<0.20897.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:25] [ns_1@127.0.0.1:<0.20797.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:27:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20877.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:27:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20912.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:27:25] [ns_1@127.0.0.1:<0.20874.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:25] [ns_1@127.0.0.1:<0.20892.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:25] [ns_1@127.0.0.1:<0.20905.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:26] [ns_1@127.0.0.1:<0.20852.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:26] [ns_1@127.0.0.1:<0.20909.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:27] [ns_1@127.0.0.1:<0.20817.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:27] [ns_1@127.0.0.1:<0.20921.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:28] [ns_1@127.0.0.1:<0.20863.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:29] [ns_1@127.0.0.1:<0.20928.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:29] [ns_1@127.0.0.1:<0.20833.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:29] [ns_1@127.0.0.1:<0.20942.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:30] [ns_1@127.0.0.1:<0.20934.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:30] [ns_1@127.0.0.1:<0.20880.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:27:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20912.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:27:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20951.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:27:30] [ns_1@127.0.0.1:<0.20942.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:31] [ns_1@127.0.0.1:<0.20938.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:31] [ns_1@127.0.0.1:<0.20846.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:31] [ns_1@127.0.0.1:<0.20942.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:32] [ns_1@127.0.0.1:<0.20948.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:32] [ns_1@127.0.0.1:<0.20895.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:32] [ns_1@127.0.0.1:<0.20942.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:33] [ns_1@127.0.0.1:<0.20955.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:33] [ns_1@127.0.0.1:<0.20859.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:27:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.20942.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:27:34] [ns_1@127.0.0.1:<0.20964.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:34] [ns_1@127.0.0.1:<0.20907.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:35] [ns_1@127.0.0.1:<0.20970.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:35] [ns_1@127.0.0.1:<0.20872.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:27:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20951.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:27:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.20984.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:27:36] [ns_1@127.0.0.1:<0.20977.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:36] [ns_1@127.0.0.1:<0.20926.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:37] [ns_1@127.0.0.1:<0.20981.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:37] [ns_1@127.0.0.1:<0.20890.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:38] [ns_1@127.0.0.1:<0.20990.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:38] [ns_1@127.0.0.1:<0.20936.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:39] [ns_1@127.0.0.1:<0.20997.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:39] [ns_1@127.0.0.1:<0.20903.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:39] [ns_1@127.0.0.1:<0.21012.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:40] [ns_1@127.0.0.1:<0.21003.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:40] [ns_1@127.0.0.1:<0.20953.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:27:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.20984.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:27:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21020.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:27:40] [ns_1@127.0.0.1:<0.21012.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:41] [ns_1@127.0.0.1:<0.21008.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:41] [ns_1@127.0.0.1:<0.20915.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:41] [ns_1@127.0.0.1:<0.21012.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:42] [ns_1@127.0.0.1:<0.21017.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:42] [ns_1@127.0.0.1:<0.20968.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:42] [ns_1@127.0.0.1:<0.21012.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:43] [ns_1@127.0.0.1:<0.21026.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:43] [ns_1@127.0.0.1:<0.20917.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:27:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21012.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:27:44] [ns_1@127.0.0.1:<0.21034.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:44] [ns_1@127.0.0.1:<0.20979.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:45] [ns_1@127.0.0.1:<0.21039.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:27:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21020.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:27:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21052.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:27:45] [ns_1@127.0.0.1:<0.20919.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:46] [ns_1@127.0.0.1:<0.21046.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:46] [ns_1@127.0.0.1:<0.20995.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:47] [ns_1@127.0.0.1:<0.21053.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:47] [ns_1@127.0.0.1:<0.20932.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:48] [ns_1@127.0.0.1:<0.21061.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:48] [ns_1@127.0.0.1:<0.21006.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:49] [ns_1@127.0.0.1:<0.21065.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:49] [ns_1@127.0.0.1:<0.21078.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:49] [ns_1@127.0.0.1:<0.20946.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:50] [ns_1@127.0.0.1:<0.21072.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:27:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21052.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:27:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21086.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:27:50] [ns_1@127.0.0.1:<0.21078.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:50] [ns_1@127.0.0.1:<0.21022.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:51] [ns_1@127.0.0.1:<0.21081.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:51] [ns_1@127.0.0.1:<0.21078.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:51] [ns_1@127.0.0.1:<0.20962.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:52] [ns_1@127.0.0.1:<0.21088.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:52] [ns_1@127.0.0.1:<0.21078.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:52] [ns_1@127.0.0.1:<0.21037.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:53] [ns_1@127.0.0.1:<0.21099.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:27:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21078.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:27:53] [ns_1@127.0.0.1:<0.20975.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:54] [ns_1@127.0.0.1:<0.21104.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:54] [ns_1@127.0.0.1:<0.21049.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:55] [ns_1@127.0.0.1:<0.21112.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:27:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21086.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:27:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21121.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:27:55] [ns_1@127.0.0.1:<0.20988.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:56] [ns_1@127.0.0.1:<0.21116.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:56] [ns_1@127.0.0.1:<0.21063.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:57] [ns_1@127.0.0.1:<0.21124.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:57] [ns_1@127.0.0.1:<0.21001.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:58] [ns_1@127.0.0.1:<0.21131.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:58] [ns_1@127.0.0.1:<0.21074.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:27:59] [ns_1@127.0.0.1:<0.21137.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:27:59] [ns_1@127.0.0.1:<0.21148.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:27:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,754076,666325}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38110168}, {processes,10058688}, {processes_used,8433208}, {system,28051480}, {atom,1306681}, {atom_used,1284164}, {binary,392944}, {code,12859877}, {ets,2422544}]}, {system_stats, [{cpu_utilization_rate,25.376884422110553}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,5098}, {memory_data,{4040077312,4014481408,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 24616 kB\nBuffers: 62388 kB\nCached: 3530004 kB\nSwapCached: 0 kB\nActive: 310208 kB\nInactive: 3442740 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 24616 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 84 kB\nWriteback: 0 kB\nAnonPages: 160552 kB\nMapped: 24872 kB\nSlab: 134348 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580356 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3614724096}, {buffered_memory,63885312}, {free_memory,25206784}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{5093403,0}}, {context_switches,{1509915,0}}, {garbage_collection,{799821,1114893247,0}}, {io,{{input,25889454},{output,57853743}}}, {reductions,{320836695,630173}}, {run_queue,0}, {runtime,{65420,130}}]}]}] [stats:error] [2012-03-26 2:27:59] [ns_1@127.0.0.1:<0.21015.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:00] [ns_1@127.0.0.1:<0.21141.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:28:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21121.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:28:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21157.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:28:00] [ns_1@127.0.0.1:<0.21148.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:00] [ns_1@127.0.0.1:<0.21091.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:01] [ns_1@127.0.0.1:<0.21152.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:01] [ns_1@127.0.0.1:<0.21148.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:01] [ns_1@127.0.0.1:<0.21032.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:02] [ns_1@127.0.0.1:<0.21159.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:02] [ns_1@127.0.0.1:<0.21148.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:02] [ns_1@127.0.0.1:<0.21106.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:03] [ns_1@127.0.0.1:<0.21168.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:28:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21148.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:28:03] [ns_1@127.0.0.1:<0.21044.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:04] [ns_1@127.0.0.1:<0.21174.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:05] [ns_1@127.0.0.1:<0.21118.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:05] [ns_1@127.0.0.1:<0.21181.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:28:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21157.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:28:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21190.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:28:06] [ns_1@127.0.0.1:<0.21057.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:06] [ns_1@127.0.0.1:<0.21185.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:07] [ns_1@127.0.0.1:<0.21133.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:07] [ns_1@127.0.0.1:<0.21194.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:08] [ns_1@127.0.0.1:<0.21070.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:08] [ns_1@127.0.0.1:<0.21201.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:09] [ns_1@127.0.0.1:<0.21143.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:09] [ns_1@127.0.0.1:<0.21207.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:09] [ns_1@127.0.0.1:<0.21220.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:10] [ns_1@127.0.0.1:<0.21083.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:10] [ns_1@127.0.0.1:<0.21212.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:28:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21190.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:28:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21228.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:28:10] [ns_1@127.0.0.1:<0.21220.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:11] [ns_1@127.0.0.1:<0.21161.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:11] [ns_1@127.0.0.1:<0.21223.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:11] [ns_1@127.0.0.1:<0.21220.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:12] [ns_1@127.0.0.1:<0.21101.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:12] [ns_1@127.0.0.1:<0.21230.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:12] [ns_1@127.0.0.1:<0.21220.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:13] [ns_1@127.0.0.1:<0.21176.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:13] [ns_1@127.0.0.1:<0.21240.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:28:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21220.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:28:14] [ns_1@127.0.0.1:<0.21114.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:14] [ns_1@127.0.0.1:<0.21245.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:15] [ns_1@127.0.0.1:<0.21187.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:15] [ns_1@127.0.0.1:<0.21252.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:28:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21228.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:28:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21262.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:28:16] [ns_1@127.0.0.1:<0.21126.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:16] [ns_1@127.0.0.1:<0.21257.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:17] [ns_1@127.0.0.1:<0.21203.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:17] [ns_1@127.0.0.1:<0.21265.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:18] [ns_1@127.0.0.1:<0.21139.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:18] [ns_1@127.0.0.1:<0.21271.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:19] [ns_1@127.0.0.1:<0.21214.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:19] [ns_1@127.0.0.1:<0.21278.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:19] [ns_1@127.0.0.1:<0.21288.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:20] [ns_1@127.0.0.1:<0.21154.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:28:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21262.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:28:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21294.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:28:20] [ns_1@127.0.0.1:<0.21282.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:20] [ns_1@127.0.0.1:<0.21288.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:21] [ns_1@127.0.0.1:<0.21232.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:21] [ns_1@127.0.0.1:<0.21291.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:21] [ns_1@127.0.0.1:<0.21288.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:22] [ns_1@127.0.0.1:<0.21170.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:22] [ns_1@127.0.0.1:<0.21299.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:22] [ns_1@127.0.0.1:<0.21288.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:23] [ns_1@127.0.0.1:<0.21247.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:23] [ns_1@127.0.0.1:<0.21309.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:28:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21288.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:28:24] [ns_1@127.0.0.1:<0.21183.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:24] [ns_1@127.0.0.1:<0.21314.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:25] [ns_1@127.0.0.1:<0.21259.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:28:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21294.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:28:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21329.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:28:25] [ns_1@127.0.0.1:<0.21322.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:26] [ns_1@127.0.0.1:<0.21196.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:26] [ns_1@127.0.0.1:<0.21209.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:26] [ns_1@127.0.0.1:<0.21225.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:26] [ns_1@127.0.0.1:<0.21242.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:26] [ns_1@127.0.0.1:<0.21254.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:26] [ns_1@127.0.0.1:<0.21326.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:27] [ns_1@127.0.0.1:<0.21273.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:27] [ns_1@127.0.0.1:<0.21334.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:28] [ns_1@127.0.0.1:<0.21267.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:28] [ns_1@127.0.0.1:<0.21349.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:29] [ns_1@127.0.0.1:<0.21284.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:29] [ns_1@127.0.0.1:<0.21363.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:29] [ns_1@127.0.0.1:<0.21336.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:30] [ns_1@127.0.0.1:<0.21280.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:28:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21329.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:28:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21372.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:28:30] [ns_1@127.0.0.1:<0.21363.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:30] [ns_1@127.0.0.1:<0.21359.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:31] [ns_1@127.0.0.1:<0.21304.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:31] [ns_1@127.0.0.1:<0.21363.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:31] [ns_1@127.0.0.1:<0.21338.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:32] [ns_1@127.0.0.1:<0.21295.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:32] [ns_1@127.0.0.1:<0.21363.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:32] [ns_1@127.0.0.1:<0.21376.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:33] [ns_1@127.0.0.1:<0.21316.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:28:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21363.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:28:33] [ns_1@127.0.0.1:<0.21340.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:34] [ns_1@127.0.0.1:<0.21311.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:34] [ns_1@127.0.0.1:<0.21391.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:35] [ns_1@127.0.0.1:<0.21332.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:28:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21372.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:28:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21405.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:28:35] [ns_1@127.0.0.1:<0.21342.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:36] [ns_1@127.0.0.1:<0.21324.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:36] [ns_1@127.0.0.1:<0.21402.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:37] [ns_1@127.0.0.1:<0.21353.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:38] [ns_1@127.0.0.1:<0.21355.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:38] [ns_1@127.0.0.1:<0.21347.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:39] [ns_1@127.0.0.1:<0.21418.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:39] [ns_1@127.0.0.1:<0.21367.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:39] [ns_1@127.0.0.1:<0.21433.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:40] [ns_1@127.0.0.1:<0.21369.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:40] [ns_1@127.0.0.1:<0.21357.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:28:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21405.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:28:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21441.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:28:40] [ns_1@127.0.0.1:<0.21433.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:41] [ns_1@127.0.0.1:<0.21429.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:41] [ns_1@127.0.0.1:<0.21383.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:41] [ns_1@127.0.0.1:<0.21433.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:42] [ns_1@127.0.0.1:<0.21385.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:42] [ns_1@127.0.0.1:<0.21374.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:42] [ns_1@127.0.0.1:<0.21433.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:43] [ns_1@127.0.0.1:<0.21445.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:43] [ns_1@127.0.0.1:<0.21396.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:28:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21433.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:28:44] [ns_1@127.0.0.1:<0.21398.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:44] [ns_1@127.0.0.1:<0.21389.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:45] [ns_1@127.0.0.1:<0.21460.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:45] [ns_1@127.0.0.1:<0.21409.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:28:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21441.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:28:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21475.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:28:46] [ns_1@127.0.0.1:<0.21411.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:46] [ns_1@127.0.0.1:<0.21400.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:47] [ns_1@127.0.0.1:<0.21472.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:47] [ns_1@127.0.0.1:<0.21422.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:48] [ns_1@127.0.0.1:<0.21424.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:48] [ns_1@127.0.0.1:<0.21416.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:49] [ns_1@127.0.0.1:<0.21486.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:49] [ns_1@127.0.0.1:<0.21436.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:49] [ns_1@127.0.0.1:<0.21501.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:50] [ns_1@127.0.0.1:<0.21438.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:50] [ns_1@127.0.0.1:<0.21427.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:28:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21475.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:28:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21509.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:28:50] [ns_1@127.0.0.1:<0.21501.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:51] [ns_1@127.0.0.1:<0.21497.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:51] [ns_1@127.0.0.1:<0.21453.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:51] [ns_1@127.0.0.1:<0.21501.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:52] [ns_1@127.0.0.1:<0.21455.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:52] [ns_1@127.0.0.1:<0.21443.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:52] [ns_1@127.0.0.1:<0.21501.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:53] [ns_1@127.0.0.1:<0.21514.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:53] [ns_1@127.0.0.1:<0.21465.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:28:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21501.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:28:54] [ns_1@127.0.0.1:<0.21467.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:54] [ns_1@127.0.0.1:<0.21458.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:55] [ns_1@127.0.0.1:<0.21529.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:28:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21509.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:28:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21542.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:28:55] [ns_1@127.0.0.1:<0.21478.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:56] [ns_1@127.0.0.1:<0.21480.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:56] [ns_1@127.0.0.1:<0.21470.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:57] [ns_1@127.0.0.1:<0.21543.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:57] [ns_1@127.0.0.1:<0.21491.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:58] [ns_1@127.0.0.1:<0.21493.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:58] [ns_1@127.0.0.1:<0.21484.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:28:59] [ns_1@127.0.0.1:<0.21558.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:28:59] [ns_1@127.0.0.1:<0.21569.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:28:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,754136,693324}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38242088}, {processes,10148072}, {processes_used,8522592}, {system,28094016}, {atom,1306681}, {atom_used,1284164}, {binary,400600}, {code,12859877}, {ets,2451176}]}, {system_stats, [{cpu_utilization_rate,25.06265664160401}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,5158}, {memory_data,{4040077312,4014870528,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 24624 kB\nBuffers: 62492 kB\nCached: 3530160 kB\nSwapCached: 0 kB\nActive: 310292 kB\nInactive: 3442908 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 24624 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 84 kB\nWriteback: 0 kB\nAnonPages: 160560 kB\nMapped: 24872 kB\nSlab: 134316 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580356 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3614883840}, {buffered_memory,63991808}, {free_memory,25214976}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{5153429,0}}, {context_switches,{1523069,0}}, {garbage_collection,{807400,1126371926,0}}, {io,{{input,25920061},{output,58300854}}}, {reductions,{323510317,651307}}, {run_queue,0}, {runtime,{65980,130}}]}]}] [stats:error] [2012-03-26 2:28:59] [ns_1@127.0.0.1:<0.21504.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:00] [ns_1@127.0.0.1:<0.21506.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:29:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21542.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:29:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21578.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:29:00] [ns_1@127.0.0.1:<0.21569.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:00] [ns_1@127.0.0.1:<0.21495.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:01] [ns_1@127.0.0.1:<0.21572.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:01] [ns_1@127.0.0.1:<0.21569.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:01] [ns_1@127.0.0.1:<0.21522.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:02] [ns_1@127.0.0.1:<0.21524.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:02] [ns_1@127.0.0.1:<0.21569.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:02] [ns_1@127.0.0.1:<0.21512.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:03] [ns_1@127.0.0.1:<0.21589.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:29:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21569.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:29:03] [ns_1@127.0.0.1:<0.21535.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:04] [ns_1@127.0.0.1:<0.21537.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:04] [ns_1@127.0.0.1:<0.21527.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:05] [ns_1@127.0.0.1:<0.21602.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:29:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21578.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:29:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21611.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:29:05] [ns_1@127.0.0.1:<0.21547.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:06] [ns_1@127.0.0.1:<0.21551.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:06] [ns_1@127.0.0.1:<0.21539.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:07] [ns_1@127.0.0.1:<0.21615.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:07] [ns_1@127.0.0.1:<0.21560.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:08] [ns_1@127.0.0.1:<0.21562.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:08] [ns_1@127.0.0.1:<0.21554.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:09] [ns_1@127.0.0.1:<0.21628.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:09] [ns_1@127.0.0.1:<0.21641.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:09] [ns_1@127.0.0.1:<0.21575.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:10] [ns_1@127.0.0.1:<0.21580.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:29:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21611.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:29:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21649.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:29:10] [ns_1@127.0.0.1:<0.21641.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:10] [ns_1@127.0.0.1:<0.21564.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:11] [ns_1@127.0.0.1:<0.21644.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:11] [ns_1@127.0.0.1:<0.21641.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:11] [ns_1@127.0.0.1:<0.21591.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:12] [ns_1@127.0.0.1:<0.21595.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:12] [ns_1@127.0.0.1:<0.21641.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:12] [ns_1@127.0.0.1:<0.21582.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:13] [ns_1@127.0.0.1:<0.21661.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:29:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21641.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:29:14] [ns_1@127.0.0.1:<0.21604.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:14] [ns_1@127.0.0.1:<0.21606.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:15] [ns_1@127.0.0.1:<0.21597.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:15] [ns_1@127.0.0.1:<0.21673.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:29:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21649.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:29:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21683.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:29:16] [ns_1@127.0.0.1:<0.21617.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:16] [ns_1@127.0.0.1:<0.21622.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:17] [ns_1@127.0.0.1:<0.21608.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:17] [ns_1@127.0.0.1:<0.21686.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:18] [ns_1@127.0.0.1:<0.21630.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:18] [ns_1@127.0.0.1:<0.21633.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:19] [ns_1@127.0.0.1:<0.21624.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:19] [ns_1@127.0.0.1:<0.21699.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:19] [ns_1@127.0.0.1:<0.21709.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:20] [ns_1@127.0.0.1:<0.21646.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:20] [ns_1@127.0.0.1:<0.21651.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:29:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21683.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:29:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21717.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:29:20] [ns_1@127.0.0.1:<0.21709.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:21] [ns_1@127.0.0.1:<0.21635.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:21] [ns_1@127.0.0.1:<0.21712.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:21] [ns_1@127.0.0.1:<0.21709.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:22] [ns_1@127.0.0.1:<0.21663.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:22] [ns_1@127.0.0.1:<0.21666.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:22] [ns_1@127.0.0.1:<0.21709.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:23] [ns_1@127.0.0.1:<0.21653.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:23] [ns_1@127.0.0.1:<0.21730.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:29:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21709.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:29:24] [ns_1@127.0.0.1:<0.21675.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:24] [ns_1@127.0.0.1:<0.21678.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:25] [ns_1@127.0.0.1:<0.21668.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:25] [ns_1@127.0.0.1:<0.21743.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:29:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21717.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:29:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21752.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:29:26] [ns_1@127.0.0.1:<0.21688.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:26] [ns_1@127.0.0.1:<0.21692.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:26] [ns_1@127.0.0.1:<0.21735.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:26] [ns_1@127.0.0.1:<0.21747.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:26] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 2:29:26] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 2:29:26] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:29:26] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:29:26] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:warn] [2012-03-26 2:29:31] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 2:29:31] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 2:29:31] [ns_1@127.0.0.1:<0.21680.1>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 2:29:31] [ns_1@127.0.0.1:<0.21732.1>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 2:29:31] [ns_1@127.0.0.1:<0.21796.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:31] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 2:29:31] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:29:31] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:29:31] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 2:29:31] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 2:29:31] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [stats:error] [2012-03-26 2:29:31] [ns_1@127.0.0.1:<0.21745.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:31] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:29:31] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:29:31] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 2:29:31] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 02:29:33: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 2:29:32] [ns_1@127.0.0.1:<0.21757.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:32] [ns_1@127.0.0.1:<0.21796.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:32] [ns_1@127.0.0.1:<0.21694.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:33] [ns_1@127.0.0.1:<0.21705.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:33] [ns_1@127.0.0.1:<0.21796.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:33] [ns_1@127.0.0.1:<0.21808.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:34] [ns_1@127.0.0.1:<0.21761.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:34] [ns_1@127.0.0.1:<0.21796.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:34] [ns_1@127.0.0.1:<0.21814.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:35] [ns_1@127.0.0.1:<0.21722.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:29:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21752.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:29:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21829.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:29:35] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:29:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21796.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:29:35] [ns_1@127.0.0.1:<0.21821.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:36] [ns_1@127.0.0.1:<0.21764.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:36] [ns_1@127.0.0.1:<0.21826.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:37] [ns_1@127.0.0.1:<0.21737.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:37] [ns_1@127.0.0.1:<0.21836.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:38] [ns_1@127.0.0.1:<0.21767.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:38] [ns_1@127.0.0.1:<0.21843.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:39] [ns_1@127.0.0.1:<0.21749.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:39] [ns_1@127.0.0.1:<0.21858.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:39] [ns_1@127.0.0.1:<0.21849.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:40] [ns_1@127.0.0.1:<0.21769.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:29:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21829.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:29:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21866.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:29:40] [ns_1@127.0.0.1:<0.21858.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:40] [ns_1@127.0.0.1:<0.21854.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:41] [ns_1@127.0.0.1:<0.21789.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:41] [ns_1@127.0.0.1:<0.21858.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:41] [ns_1@127.0.0.1:<0.21863.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:42] [ns_1@127.0.0.1:<0.21771.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:42] [ns_1@127.0.0.1:<0.21858.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:42] [ns_1@127.0.0.1:<0.21870.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:43] [ns_1@127.0.0.1:<0.21790.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:29:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21858.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:29:43] [ns_1@127.0.0.1:<0.21880.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:44] [ns_1@127.0.0.1:<0.21811.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:44] [ns_1@127.0.0.1:<0.21885.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:45] [ns_1@127.0.0.1:<0.21791.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:29:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21866.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:29:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21900.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:29:45] [ns_1@127.0.0.1:<0.21892.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:46] [ns_1@127.0.0.1:<0.21824.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:46] [ns_1@127.0.0.1:<0.21897.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:47] [ns_1@127.0.0.1:<0.21792.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:48] [ns_1@127.0.0.1:<0.21905.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:48] [ns_1@127.0.0.1:<0.21841.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:49] [ns_1@127.0.0.1:<0.21911.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:49] [ns_1@127.0.0.1:<0.21799.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:49] [ns_1@127.0.0.1:<0.21926.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:50] [ns_1@127.0.0.1:<0.21918.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:50] [ns_1@127.0.0.1:<0.21852.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:29:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21900.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:29:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21934.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:29:50] [ns_1@127.0.0.1:<0.21926.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:51] [ns_1@127.0.0.1:<0.21922.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:51] [ns_1@127.0.0.1:<0.21819.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:51] [ns_1@127.0.0.1:<0.21926.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:52] [ns_1@127.0.0.1:<0.21931.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:52] [ns_1@127.0.0.1:<0.21868.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:52] [ns_1@127.0.0.1:<0.21926.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:53] [ns_1@127.0.0.1:<0.21939.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:53] [ns_1@127.0.0.1:<0.21834.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:29:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.21926.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:29:54] [ns_1@127.0.0.1:<0.21949.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:54] [ns_1@127.0.0.1:<0.21883.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:55] [ns_1@127.0.0.1:<0.21954.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:55] [ns_1@127.0.0.1:<0.21847.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:29:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21934.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:29:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.21969.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:29:56] [ns_1@127.0.0.1:<0.21962.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:56] [ns_1@127.0.0.1:<0.21895.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:57] [ns_1@127.0.0.1:<0.21966.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:57] [ns_1@127.0.0.1:<0.21861.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:58] [ns_1@127.0.0.1:<0.21974.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:58] [ns_1@127.0.0.1:<0.21909.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:59] [ns_1@127.0.0.1:<0.21981.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:29:59] [ns_1@127.0.0.1:<0.21878.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:29:59] [ns_1@127.0.0.1:<0.22011.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:29:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,754196,719215}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38051448}, {processes,9957568}, {processes_used,8332088}, {system,28093880}, {atom,1306681}, {atom_used,1284164}, {binary,422368}, {code,12859877}, {ets,2422312}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,5218}, {memory_data,{4040077312,4014870528,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 24112 kB\nBuffers: 62600 kB\nCached: 3530320 kB\nSwapCached: 0 kB\nActive: 310400 kB\nInactive: 3443068 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 24112 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 76 kB\nWriteback: 0 kB\nAnonPages: 160572 kB\nMapped: 24872 kB\nSlab: 134344 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580344 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3615047680}, {buffered_memory,64102400}, {free_memory,24690688}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{5213456,0}}, {context_switches,{1536283,0}}, {garbage_collection,{814666,1137985000,0}}, {io,{{input,25956544},{output,58784995}}}, {reductions,{326140818,631540}}, {run_queue,0}, {runtime,{66620,160}}]}]}] [stats:error] [2012-03-26 2:30:00] [ns_1@127.0.0.1:<0.21987.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:00] [ns_1@127.0.0.1:<0.21920.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:30:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.21969.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:30:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22020.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:30:00] [ns_1@127.0.0.1:<0.22011.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:01] [ns_1@127.0.0.1:<0.22006.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:01] [ns_1@127.0.0.1:<0.21890.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:01] [ns_1@127.0.0.1:<0.22011.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:02] [ns_1@127.0.0.1:<0.22017.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:02] [ns_1@127.0.0.1:<0.21937.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:02] [ns_1@127.0.0.1:<0.22011.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:03] [ns_1@127.0.0.1:<0.22024.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:03] [ns_1@127.0.0.1:<0.21903.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:30:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22011.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:30:04] [ns_1@127.0.0.1:<0.22033.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:04] [ns_1@127.0.0.1:<0.21952.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:05] [ns_1@127.0.0.1:<0.22039.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:30:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22020.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:30:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22051.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:30:05] [ns_1@127.0.0.1:<0.21916.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:06] [ns_1@127.0.0.1:<0.22046.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:06] [ns_1@127.0.0.1:<0.21964.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:07] [ns_1@127.0.0.1:<0.22052.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:07] [ns_1@127.0.0.1:<0.21929.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:08] [ns_1@127.0.0.1:<0.22062.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:08] [ns_1@127.0.0.1:<0.21979.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:09] [ns_1@127.0.0.1:<0.22066.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:09] [ns_1@127.0.0.1:<0.22081.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:09] [ns_1@127.0.0.1:<0.21947.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:10] [ns_1@127.0.0.1:<0.22072.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:30:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22051.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:30:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22089.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:30:10] [ns_1@127.0.0.1:<0.22081.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:10] [ns_1@127.0.0.1:<0.21989.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:11] [ns_1@127.0.0.1:<0.22084.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:11] [ns_1@127.0.0.1:<0.22081.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:11] [ns_1@127.0.0.1:<0.21960.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:12] [ns_1@127.0.0.1:<0.22091.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:12] [ns_1@127.0.0.1:<0.22081.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:12] [ns_1@127.0.0.1:<0.22022.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:13] [ns_1@127.0.0.1:<0.22101.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:30:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22081.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:30:13] [ns_1@127.0.0.1:<0.21972.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:14] [ns_1@127.0.0.1:<0.22106.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:14] [ns_1@127.0.0.1:<0.22037.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:15] [ns_1@127.0.0.1:<0.22113.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:30:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22089.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:30:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22123.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:30:15] [ns_1@127.0.0.1:<0.21985.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:16] [ns_1@127.0.0.1:<0.22118.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:16] [ns_1@127.0.0.1:<0.22048.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:17] [ns_1@127.0.0.1:<0.22126.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:17] [ns_1@127.0.0.1:<0.22015.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:18] [ns_1@127.0.0.1:<0.22132.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:18] [ns_1@127.0.0.1:<0.22064.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:19] [ns_1@127.0.0.1:<0.22139.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:19] [ns_1@127.0.0.1:<0.22149.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:19] [ns_1@127.0.0.1:<0.22031.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:20] [ns_1@127.0.0.1:<0.22143.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:30:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22123.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:30:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22157.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:30:20] [ns_1@127.0.0.1:<0.22149.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:20] [ns_1@127.0.0.1:<0.22075.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:21] [ns_1@127.0.0.1:<0.22152.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:21] [ns_1@127.0.0.1:<0.22149.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:21] [ns_1@127.0.0.1:<0.22044.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:22] [ns_1@127.0.0.1:<0.22160.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:22] [ns_1@127.0.0.1:<0.22149.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:23] [ns_1@127.0.0.1:<0.22093.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:23] [ns_1@127.0.0.1:<0.22170.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:30:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22149.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:30:24] [ns_1@127.0.0.1:<0.22057.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:24] [ns_1@127.0.0.1:<0.22175.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:25] [ns_1@127.0.0.1:<0.22108.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:25] [ns_1@127.0.0.1:<0.22183.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:30:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22157.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:30:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22192.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:30:26] [ns_1@127.0.0.1:<0.22070.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:26] [ns_1@127.0.0.1:<0.22187.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:27] [ns_1@127.0.0.1:<0.22120.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:27] [ns_1@127.0.0.1:<0.22195.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:28] [ns_1@127.0.0.1:<0.22086.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:28] [ns_1@127.0.0.1:<0.22202.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:29] [ns_1@127.0.0.1:<0.22134.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:29] [ns_1@127.0.0.1:<0.22208.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:29] [ns_1@127.0.0.1:<0.22218.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:30] [ns_1@127.0.0.1:<0.22103.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:30] [ns_1@127.0.0.1:<0.22212.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:30:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22192.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:30:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22227.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:30:30] [ns_1@127.0.0.1:<0.22218.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:31] [ns_1@127.0.0.1:<0.22145.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:31] [ns_1@127.0.0.1:<0.22222.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:31] [ns_1@127.0.0.1:<0.22218.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:32] [ns_1@127.0.0.1:<0.22115.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:32] [ns_1@127.0.0.1:<0.22218.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:30:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22218.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 2:30:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22227.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:30:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22246.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:30:39] [ns_1@127.0.0.1:<0.22258.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:30:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22246.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:30:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22262.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:30:40] [ns_1@127.0.0.1:<0.22258.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:41] [ns_1@127.0.0.1:<0.22258.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:42] [ns_1@127.0.0.1:<0.22128.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:42] [ns_1@127.0.0.1:<0.22229.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:42] [ns_1@127.0.0.1:<0.22258.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:43] [ns_1@127.0.0.1:<0.22162.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:43] [ns_1@127.0.0.1:<0.22238.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:30:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22258.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:30:44] [ns_1@127.0.0.1:<0.22141.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:44] [ns_1@127.0.0.1:<0.22275.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:45] [ns_1@127.0.0.1:<0.22177.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:45] [ns_1@127.0.0.1:<0.22270.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:30:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22262.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:30:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22292.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:30:46] [ns_1@127.0.0.1:<0.22154.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:46] [ns_1@127.0.0.1:<0.22287.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:47] [ns_1@127.0.0.1:<0.22189.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:47] [ns_1@127.0.0.1:<0.22282.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:48] [ns_1@127.0.0.1:<0.22172.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:48] [ns_1@127.0.0.1:<0.22301.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:49] [ns_1@127.0.0.1:<0.22204.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:49] [ns_1@127.0.0.1:<0.22295.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:49] [ns_1@127.0.0.1:<0.22318.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:50] [ns_1@127.0.0.1:<0.22185.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:30:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22292.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:30:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22324.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:30:50] [ns_1@127.0.0.1:<0.22312.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:50] [ns_1@127.0.0.1:<0.22318.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:51] [ns_1@127.0.0.1:<0.22214.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:51] [ns_1@127.0.0.1:<0.22318.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:51] [ns_1@127.0.0.1:<0.22308.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:52] [ns_1@127.0.0.1:<0.22197.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:52] [ns_1@127.0.0.1:<0.22318.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:52] [ns_1@127.0.0.1:<0.22329.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:53] [ns_1@127.0.0.1:<0.22231.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:30:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22318.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:30:53] [ns_1@127.0.0.1:<0.22321.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:54] [ns_1@127.0.0.1:<0.22210.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:54] [ns_1@127.0.0.1:<0.22344.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:55] [ns_1@127.0.0.1:<0.22277.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:30:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22324.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:30:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22359.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:30:55] [ns_1@127.0.0.1:<0.22339.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:56] [ns_1@127.0.0.1:<0.22224.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:56] [ns_1@127.0.0.1:<0.22356.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:57] [ns_1@127.0.0.1:<0.22289.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:57] [ns_1@127.0.0.1:<0.22352.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:58] [ns_1@127.0.0.1:<0.22272.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:58] [ns_1@127.0.0.1:<0.22371.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:30:59] [ns_1@127.0.0.1:<0.22303.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:30:59] [ns_1@127.0.0.1:<0.22386.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:30:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,754256,748317}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38349416}, {processes,10214256}, {processes_used,8588776}, {system,28135160}, {atom,1306681}, {atom_used,1284164}, {binary,425232}, {code,12859877}, {ets,2454352}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,5278}, {memory_data,{4040077312,4015497216,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 23872 kB\nBuffers: 62716 kB\nCached: 3530452 kB\nSwapCached: 0 kB\nActive: 310456 kB\nInactive: 3443288 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 23872 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 92 kB\nWriteback: 0 kB\nAnonPages: 160584 kB\nMapped: 24872 kB\nSlab: 134328 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580344 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3615182848}, {buffered_memory,64221184}, {free_memory,24444928}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{5273484,0}}, {context_switches,{1548745,0}}, {garbage_collection,{821827,1147910091,0}}, {io,{{input,26242112},{output,59421737}}}, {reductions,{328584907,506994}}, {run_queue,0}, {runtime,{67240,130}}]}]}] [stats:error] [2012-03-26 2:30:59] [ns_1@127.0.0.1:<0.22364.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:00] [ns_1@127.0.0.1:<0.22284.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:31:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22359.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:31:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22395.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:31:00] [ns_1@127.0.0.1:<0.22386.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:00] [ns_1@127.0.0.1:<0.22381.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:01] [ns_1@127.0.0.1:<0.22314.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:01] [ns_1@127.0.0.1:<0.22386.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:01] [ns_1@127.0.0.1:<0.22377.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:02] [ns_1@127.0.0.1:<0.22297.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:02] [ns_1@127.0.0.1:<0.22386.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:02] [ns_1@127.0.0.1:<0.22399.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:03] [ns_1@127.0.0.1:<0.22336.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:31:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22386.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:31:03] [ns_1@127.0.0.1:<0.22392.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:04] [ns_1@127.0.0.1:<0.22310.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:04] [ns_1@127.0.0.1:<0.22414.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:05] [ns_1@127.0.0.1:<0.22349.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:31:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22395.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:31:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22428.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:31:05] [ns_1@127.0.0.1:<0.22408.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:06] [ns_1@127.0.0.1:<0.22325.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:06] [ns_1@127.0.0.1:<0.22425.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:07] [ns_1@127.0.0.1:<0.22362.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:08] [ns_1@127.0.0.1:<0.22421.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:08] [ns_1@127.0.0.1:<0.22342.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:09] [ns_1@127.0.0.1:<0.22441.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:09] [ns_1@127.0.0.1:<0.22375.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:09] [ns_1@127.0.0.1:<0.22458.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:10] [ns_1@127.0.0.1:<0.22434.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:10] [ns_1@127.0.0.1:<0.22354.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:31:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22428.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:31:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22466.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:31:10] [ns_1@127.0.0.1:<0.22458.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:11] [ns_1@127.0.0.1:<0.22452.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:11] [ns_1@127.0.0.1:<0.22390.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:11] [ns_1@127.0.0.1:<0.22458.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:12] [ns_1@127.0.0.1:<0.22447.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:12] [ns_1@127.0.0.1:<0.22369.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:12] [ns_1@127.0.0.1:<0.22458.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:13] [ns_1@127.0.0.1:<0.22470.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:13] [ns_1@127.0.0.1:<0.22406.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:31:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22458.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:31:14] [ns_1@127.0.0.1:<0.22463.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:14] [ns_1@127.0.0.1:<0.22379.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:15] [ns_1@127.0.0.1:<0.22485.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:15] [ns_1@127.0.0.1:<0.22419.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:31:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22466.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:31:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22500.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:31:16] [ns_1@127.0.0.1:<0.22480.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:16] [ns_1@127.0.0.1:<0.22397.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:17] [ns_1@127.0.0.1:<0.22497.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:17] [ns_1@127.0.0.1:<0.22432.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:18] [ns_1@127.0.0.1:<0.22492.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:18] [ns_1@127.0.0.1:<0.22412.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:19] [ns_1@127.0.0.1:<0.22511.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:19] [ns_1@127.0.0.1:<0.22445.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:19] [ns_1@127.0.0.1:<0.22526.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:20] [ns_1@127.0.0.1:<0.22505.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:20] [ns_1@127.0.0.1:<0.22423.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:31:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22500.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:31:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22534.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:31:20] [ns_1@127.0.0.1:<0.22526.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:21] [ns_1@127.0.0.1:<0.22522.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:21] [ns_1@127.0.0.1:<0.22461.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:21] [ns_1@127.0.0.1:<0.22526.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:22] [ns_1@127.0.0.1:<0.22518.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:22] [ns_1@127.0.0.1:<0.22439.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:22] [ns_1@127.0.0.1:<0.22526.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:23] [ns_1@127.0.0.1:<0.22539.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:23] [ns_1@127.0.0.1:<0.22478.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:31:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22526.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:31:24] [ns_1@127.0.0.1:<0.22531.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:24] [ns_1@127.0.0.1:<0.22450.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:25] [ns_1@127.0.0.1:<0.22554.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:31:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22534.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:31:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22567.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:31:25] [ns_1@127.0.0.1:<0.22490.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:26] [ns_1@127.0.0.1:<0.22549.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:26] [ns_1@127.0.0.1:<0.22468.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:27] [ns_1@127.0.0.1:<0.22568.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:27] [ns_1@127.0.0.1:<0.22503.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:28] [ns_1@127.0.0.1:<0.22562.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:28] [ns_1@127.0.0.1:<0.22483.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:29] [ns_1@127.0.0.1:<0.22583.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:29] [ns_1@127.0.0.1:<0.22593.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:29] [ns_1@127.0.0.1:<0.22516.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:30] [ns_1@127.0.0.1:<0.22576.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:31:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22567.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:31:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22602.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:31:30] [ns_1@127.0.0.1:<0.22593.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:30] [ns_1@127.0.0.1:<0.22495.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:31] [ns_1@127.0.0.1:<0.22596.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:31] [ns_1@127.0.0.1:<0.22593.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:31] [ns_1@127.0.0.1:<0.22529.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:32] [ns_1@127.0.0.1:<0.22587.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:32] [ns_1@127.0.0.1:<0.22593.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:32] [ns_1@127.0.0.1:<0.22509.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:33] [ns_1@127.0.0.1:<0.22613.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:31:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22593.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:31:33] [ns_1@127.0.0.1:<0.22547.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:34] [ns_1@127.0.0.1:<0.22604.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:34] [ns_1@127.0.0.1:<0.22520.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:35] [ns_1@127.0.0.1:<0.22626.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:31:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22602.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:31:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22635.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:31:35] [ns_1@127.0.0.1:<0.22560.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:36] [ns_1@127.0.0.1:<0.22619.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:36] [ns_1@127.0.0.1:<0.22537.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:37] [ns_1@127.0.0.1:<0.22639.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:37] [ns_1@127.0.0.1:<0.22572.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:38] [ns_1@127.0.0.1:<0.22630.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:38] [ns_1@127.0.0.1:<0.22552.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:39] [ns_1@127.0.0.1:<0.22652.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:39] [ns_1@127.0.0.1:<0.22663.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:39] [ns_1@127.0.0.1:<0.22585.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:40] [ns_1@127.0.0.1:<0.22646.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:31:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22635.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:31:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22671.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:31:40] [ns_1@127.0.0.1:<0.22663.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:40] [ns_1@127.0.0.1:<0.22564.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:41] [ns_1@127.0.0.1:<0.22666.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:41] [ns_1@127.0.0.1:<0.22663.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:41] [ns_1@127.0.0.1:<0.22599.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:42] [ns_1@127.0.0.1:<0.22657.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:42] [ns_1@127.0.0.1:<0.22673.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:42] [ns_1@127.0.0.1:<0.22579.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:42] [ns_1@127.0.0.1:<0.22663.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:43] [ns_1@127.0.0.1:<0.22589.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:43] [ns_1@127.0.0.1:<0.22683.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:31:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22663.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:31:44] [ns_1@127.0.0.1:<0.22615.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:44] [ns_1@127.0.0.1:<0.22692.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:45] [ns_1@127.0.0.1:<0.22606.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:45] [ns_1@127.0.0.1:<0.22699.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:31:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22671.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:31:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22709.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:31:46] [ns_1@127.0.0.1:<0.22628.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:46] [ns_1@127.0.0.1:<0.22704.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:47] [ns_1@127.0.0.1:<0.22621.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:47] [ns_1@127.0.0.1:<0.22712.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:48] [ns_1@127.0.0.1:<0.22641.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:48] [ns_1@127.0.0.1:<0.22718.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:49] [ns_1@127.0.0.1:<0.22632.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:49] [ns_1@127.0.0.1:<0.22725.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:49] [ns_1@127.0.0.1:<0.22735.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:50] [ns_1@127.0.0.1:<0.22654.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:50] [ns_1@127.0.0.1:<0.22729.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:31:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22709.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:31:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22743.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:31:50] [ns_1@127.0.0.1:<0.22735.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:51] [ns_1@127.0.0.1:<0.22648.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:51] [ns_1@127.0.0.1:<0.22738.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:51] [ns_1@127.0.0.1:<0.22735.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:52] [ns_1@127.0.0.1:<0.22668.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:52] [ns_1@127.0.0.1:<0.22746.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:52] [ns_1@127.0.0.1:<0.22735.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:53] [ns_1@127.0.0.1:<0.22659.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:53] [ns_1@127.0.0.1:<0.22756.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:31:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22735.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:31:54] [ns_1@127.0.0.1:<0.22685.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:54] [ns_1@127.0.0.1:<0.22761.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:55] [ns_1@127.0.0.1:<0.22675.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:55] [ns_1@127.0.0.1:<0.22769.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:31:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22743.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:31:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22778.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:31:56] [ns_1@127.0.0.1:<0.22687.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:56] [ns_1@127.0.0.1:<0.22773.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:57] [ns_1@127.0.0.1:<0.22694.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:57] [ns_1@127.0.0.1:<0.22781.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:58] [ns_1@127.0.0.1:<0.22689.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:58] [ns_1@127.0.0.1:<0.22788.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:59] [ns_1@127.0.0.1:<0.22706.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:31:59] [ns_1@127.0.0.1:<0.22794.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:31:59] [ns_1@127.0.0.1:<0.22805.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:31:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,754316,773319}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38231088}, {processes,10117640}, {processes_used,8492160}, {system,28113448}, {atom,1306681}, {atom_used,1284164}, {binary,425672}, {code,12859877}, {ets,2425496}]}, {system_stats, [{cpu_utilization_rate,25.6857855361596}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,5338}, {memory_data,{4040077312,4015632384,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 23756 kB\nBuffers: 62764 kB\nCached: 3530612 kB\nSwapCached: 0 kB\nActive: 310572 kB\nInactive: 3443376 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 23756 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 96 kB\nWriteback: 0 kB\nAnonPages: 160592 kB\nMapped: 24872 kB\nSlab: 134340 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580344 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3615346688}, {buffered_memory,64270336}, {free_memory,24326144}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{5333510,0}}, {context_switches,{1561818,0}}, {garbage_collection,{829212,1159175616,0}}, {io,{{input,26272710},{output,59866253}}}, {reductions,{331218957,654914}}, {run_queue,0}, {runtime,{67850,170}}]}]}] [stats:error] [2012-03-26 2:32:00] [ns_1@127.0.0.1:<0.22701.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:32:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22778.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:32:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22812.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:32:00] [ns_1@127.0.0.1:<0.22798.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:00] [ns_1@127.0.0.1:<0.22805.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:01] [ns_1@127.0.0.1:<0.22720.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:01] [ns_1@127.0.0.1:<0.22805.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:01] [ns_1@127.0.0.1:<0.22809.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:02] [ns_1@127.0.0.1:<0.22714.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:02] [ns_1@127.0.0.1:<0.22805.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:02] [ns_1@127.0.0.1:<0.22816.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:03] [ns_1@127.0.0.1:<0.22731.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:32:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22805.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:32:03] [ns_1@127.0.0.1:<0.22825.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:04] [ns_1@127.0.0.1:<0.22727.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:04] [ns_1@127.0.0.1:<0.22831.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:05] [ns_1@127.0.0.1:<0.22748.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:32:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22812.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:32:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22845.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:32:05] [ns_1@127.0.0.1:<0.22838.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:06] [ns_1@127.0.0.1:<0.22740.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:06] [ns_1@127.0.0.1:<0.22842.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:07] [ns_1@127.0.0.1:<0.22763.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:07] [ns_1@127.0.0.1:<0.22851.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:08] [ns_1@127.0.0.1:<0.22758.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:08] [ns_1@127.0.0.1:<0.22858.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:09] [ns_1@127.0.0.1:<0.22775.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:09] [ns_1@127.0.0.1:<0.22875.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:09] [ns_1@127.0.0.1:<0.22864.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:10] [ns_1@127.0.0.1:<0.22771.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:32:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22845.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:32:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22883.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:32:10] [ns_1@127.0.0.1:<0.22875.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:10] [ns_1@127.0.0.1:<0.22869.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:11] [ns_1@127.0.0.1:<0.22790.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:11] [ns_1@127.0.0.1:<0.22875.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:11] [ns_1@127.0.0.1:<0.22880.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:12] [ns_1@127.0.0.1:<0.22783.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:12] [ns_1@127.0.0.1:<0.22875.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:12] [ns_1@127.0.0.1:<0.22887.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:13] [ns_1@127.0.0.1:<0.22801.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:32:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22875.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:32:13] [ns_1@127.0.0.1:<0.22897.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:14] [ns_1@127.0.0.1:<0.22796.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:14] [ns_1@127.0.0.1:<0.22902.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:15] [ns_1@127.0.0.1:<0.22823.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:32:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22883.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:32:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22917.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:32:15] [ns_1@127.0.0.1:<0.22909.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:16] [ns_1@127.0.0.1:<0.22813.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:17] [ns_1@127.0.0.1:<0.22914.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:17] [ns_1@127.0.0.1:<0.22836.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:18] [ns_1@127.0.0.1:<0.22922.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:18] [ns_1@127.0.0.1:<0.22828.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:19] [ns_1@127.0.0.1:<0.22928.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:19] [ns_1@127.0.0.1:<0.22849.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:19] [ns_1@127.0.0.1:<0.22943.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:20] [ns_1@127.0.0.1:<0.22935.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:20] [ns_1@127.0.0.1:<0.22840.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:32:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22917.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:32:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22951.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:32:20] [ns_1@127.0.0.1:<0.22943.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:21] [ns_1@127.0.0.1:<0.22939.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:21] [ns_1@127.0.0.1:<0.22862.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:21] [ns_1@127.0.0.1:<0.22943.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:22] [ns_1@127.0.0.1:<0.22948.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:22] [ns_1@127.0.0.1:<0.22856.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:22] [ns_1@127.0.0.1:<0.22943.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:23] [ns_1@127.0.0.1:<0.22956.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:23] [ns_1@127.0.0.1:<0.22878.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:32:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.22943.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:32:24] [ns_1@127.0.0.1:<0.22966.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:24] [ns_1@127.0.0.1:<0.22867.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:25] [ns_1@127.0.0.1:<0.22971.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:25] [ns_1@127.0.0.1:<0.22895.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:32:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22951.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:32:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.22986.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:32:26] [ns_1@127.0.0.1:<0.22979.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:26] [ns_1@127.0.0.1:<0.22885.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:27] [ns_1@127.0.0.1:<0.22983.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:27] [ns_1@127.0.0.1:<0.22907.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:28] [ns_1@127.0.0.1:<0.22991.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:28] [ns_1@127.0.0.1:<0.22900.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:29] [ns_1@127.0.0.1:<0.22998.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:29] [ns_1@127.0.0.1:<0.22920.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:29] [ns_1@127.0.0.1:<0.23012.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:30] [ns_1@127.0.0.1:<0.23004.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:30] [ns_1@127.0.0.1:<0.22912.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:32:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.22986.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:32:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23021.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:32:30] [ns_1@127.0.0.1:<0.23012.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:31] [ns_1@127.0.0.1:<0.23008.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:31] [ns_1@127.0.0.1:<0.22933.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:31] [ns_1@127.0.0.1:<0.23012.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:32] [ns_1@127.0.0.1:<0.23018.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:32] [ns_1@127.0.0.1:<0.22926.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:32] [ns_1@127.0.0.1:<0.23012.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:33] [ns_1@127.0.0.1:<0.23025.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:33] [ns_1@127.0.0.1:<0.22946.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:32:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23012.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:32:34] [ns_1@127.0.0.1:<0.23034.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:34] [ns_1@127.0.0.1:<0.22937.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:35] [ns_1@127.0.0.1:<0.23040.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:32:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23021.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:32:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23052.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:32:35] [ns_1@127.0.0.1:<0.22964.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:36] [ns_1@127.0.0.1:<0.23047.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:36] [ns_1@127.0.0.1:<0.22954.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:37] [ns_1@127.0.0.1:<0.23053.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:37] [ns_1@127.0.0.1:<0.22977.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:38] [ns_1@127.0.0.1:<0.23060.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:38] [ns_1@127.0.0.1:<0.22969.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:39] [ns_1@127.0.0.1:<0.23069.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:39] [ns_1@127.0.0.1:<0.23080.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:39] [ns_1@127.0.0.1:<0.22989.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:40] [ns_1@127.0.0.1:<0.23073.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:32:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23052.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:32:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23088.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:32:40] [ns_1@127.0.0.1:<0.23080.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:40] [ns_1@127.0.0.1:<0.22981.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:41] [ns_1@127.0.0.1:<0.23083.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:41] [ns_1@127.0.0.1:<0.23080.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:41] [ns_1@127.0.0.1:<0.23002.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:42] [ns_1@127.0.0.1:<0.23090.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:42] [ns_1@127.0.0.1:<0.23080.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:42] [ns_1@127.0.0.1:<0.22996.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:42] [ns_1@127.0.0.1:<0.23006.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:42] [ns_1@127.0.0.1:<0.23023.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:43] [ns_1@127.0.0.1:<0.23100.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:32:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23080.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:32:43] [ns_1@127.0.0.1:<0.23016.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:44] [ns_1@127.0.0.1:<0.23105.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:44] [ns_1@127.0.0.1:<0.23038.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:45] [ns_1@127.0.0.1:<0.23116.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:32:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23088.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:32:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23126.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:32:45] [ns_1@127.0.0.1:<0.23032.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:46] [ns_1@127.0.0.1:<0.23107.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:46] [ns_1@127.0.0.1:<0.23049.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:47] [ns_1@127.0.0.1:<0.23129.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:47] [ns_1@127.0.0.1:<0.23045.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:48] [ns_1@127.0.0.1:<0.23109.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:48] [ns_1@127.0.0.1:<0.23065.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:49] [ns_1@127.0.0.1:<0.23142.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:49] [ns_1@127.0.0.1:<0.23152.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:49] [ns_1@127.0.0.1:<0.23058.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:50] [ns_1@127.0.0.1:<0.23121.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:32:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23126.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:32:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23160.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:32:50] [ns_1@127.0.0.1:<0.23152.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:51] [ns_1@127.0.0.1:<0.23076.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:51] [ns_1@127.0.0.1:<0.23155.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:51] [ns_1@127.0.0.1:<0.23152.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:52] [ns_1@127.0.0.1:<0.23071.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:52] [ns_1@127.0.0.1:<0.23135.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:52] [ns_1@127.0.0.1:<0.23152.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:53] [ns_1@127.0.0.1:<0.23092.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:53] [ns_1@127.0.0.1:<0.23173.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:32:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23152.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:32:54] [ns_1@127.0.0.1:<0.23085.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:54] [ns_1@127.0.0.1:<0.23146.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:55] [ns_1@127.0.0.1:<0.23111.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:55] [ns_1@127.0.0.1:<0.23186.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:32:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23160.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:32:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23195.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:32:56] [ns_1@127.0.0.1:<0.23102.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:56] [ns_1@127.0.0.1:<0.23163.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:57] [ns_1@127.0.0.1:<0.23123.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:57] [ns_1@127.0.0.1:<0.23198.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:58] [ns_1@127.0.0.1:<0.23118.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:58] [ns_1@127.0.0.1:<0.23178.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:59] [ns_1@127.0.0.1:<0.23137.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:32:59] [ns_1@127.0.0.1:<0.23211.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:32:59] [ns_1@127.0.0.1:<0.23227.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:32:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,754376,800344}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38372616}, {processes,10227544}, {processes_used,8602064}, {system,28145072}, {atom,1306681}, {atom_used,1284164}, {binary,420968}, {code,12859877}, {ets,2455392}]}, {system_stats, [{cpu_utilization_rate,25.56390977443609}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,5398}, {memory_data,{4040077312,4015759360,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26428 kB\nBuffers: 62820 kB\nCached: 3527712 kB\nSwapCached: 0 kB\nActive: 310900 kB\nInactive: 3440556 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26428 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 92 kB\nWriteback: 0 kB\nAnonPages: 160896 kB\nMapped: 24872 kB\nSlab: 134352 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580344 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3612377088}, {buffered_memory,64327680}, {free_memory,27062272}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{5393536,0}}, {context_switches,{1574894,0}}, {garbage_collection,{836375,1170373879,0}}, {io,{{input,26303396},{output,60307187}}}, {reductions,{333812454,627310}}, {run_queue,0}, {runtime,{68460,160}}]}]}] [stats:error] [2012-03-26 2:33:00] [ns_1@127.0.0.1:<0.23131.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:00] [ns_1@127.0.0.1:<0.23190.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:33:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23195.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:33:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23236.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:33:00] [ns_1@127.0.0.1:<0.23227.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:01] [ns_1@127.0.0.1:<0.23148.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:01] [ns_1@127.0.0.1:<0.23231.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:01] [ns_1@127.0.0.1:<0.23227.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:02] [ns_1@127.0.0.1:<0.23144.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:02] [ns_1@127.0.0.1:<0.23205.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:02] [ns_1@127.0.0.1:<0.23227.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:03] [ns_1@127.0.0.1:<0.23165.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:03] [ns_1@127.0.0.1:<0.23255.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:33:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23227.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:33:04] [ns_1@127.0.0.1:<0.23157.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:04] [ns_1@127.0.0.1:<0.23215.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:05] [ns_1@127.0.0.1:<0.23180.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:05] [ns_1@127.0.0.1:<0.23268.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:33:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23236.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:33:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23277.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:33:06] [ns_1@127.0.0.1:<0.23175.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:06] [ns_1@127.0.0.1:<0.23238.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:07] [ns_1@127.0.0.1:<0.23192.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:07] [ns_1@127.0.0.1:<0.23281.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:08] [ns_1@127.0.0.1:<0.23188.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:08] [ns_1@127.0.0.1:<0.23261.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:09] [ns_1@127.0.0.1:<0.23207.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:09] [ns_1@127.0.0.1:<0.23305.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:09] [ns_1@127.0.0.1:<0.23294.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:10] [ns_1@127.0.0.1:<0.23200.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:33:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23277.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:33:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23313.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:33:10] [ns_1@127.0.0.1:<0.23305.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:10] [ns_1@127.0.0.1:<0.23272.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:11] [ns_1@127.0.0.1:<0.23222.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:11] [ns_1@127.0.0.1:<0.23305.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:11] [ns_1@127.0.0.1:<0.23310.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:12] [ns_1@127.0.0.1:<0.23213.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:12] [ns_1@127.0.0.1:<0.23305.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:12] [ns_1@127.0.0.1:<0.23288.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:13] [ns_1@127.0.0.1:<0.23240.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:33:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23305.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:33:13] [ns_1@127.0.0.1:<0.23327.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:14] [ns_1@127.0.0.1:<0.23233.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:14] [ns_1@127.0.0.1:<0.23299.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:15] [ns_1@127.0.0.1:<0.23263.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:33:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23313.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:33:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23348.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:33:15] [ns_1@127.0.0.1:<0.23339.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:16] [ns_1@127.0.0.1:<0.23257.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:16] [ns_1@127.0.0.1:<0.23317.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:17] [ns_1@127.0.0.1:<0.23274.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:17] [ns_1@127.0.0.1:<0.23352.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:18] [ns_1@127.0.0.1:<0.23270.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:18] [ns_1@127.0.0.1:<0.23332.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:19] [ns_1@127.0.0.1:<0.23290.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:19] [ns_1@127.0.0.1:<0.23373.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:19] [ns_1@127.0.0.1:<0.23365.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:20] [ns_1@127.0.0.1:<0.23283.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:33:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23348.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:33:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23381.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:33:20] [ns_1@127.0.0.1:<0.23373.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:20] [ns_1@127.0.0.1:<0.23344.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:21] [ns_1@127.0.0.1:<0.23308.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:21] [ns_1@127.0.0.1:<0.23373.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:21] [ns_1@127.0.0.1:<0.23378.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:22] [ns_1@127.0.0.1:<0.23296.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:22] [ns_1@127.0.0.1:<0.23373.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:22] [ns_1@127.0.0.1:<0.23358.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:23] [ns_1@127.0.0.1:<0.23324.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:33:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23373.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:33:24] [ns_1@127.0.0.1:<0.23396.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:24] [ns_1@127.0.0.1:<0.23315.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:25] [ns_1@127.0.0.1:<0.23369.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:25] [ns_1@127.0.0.1:<0.23337.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:33:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23381.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:33:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23418.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:33:26] [ns_1@127.0.0.1:<0.23409.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:26] [ns_1@127.0.0.1:<0.23330.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:27] [ns_1@127.0.0.1:<0.23386.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:27] [ns_1@127.0.0.1:<0.23350.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:28] [ns_1@127.0.0.1:<0.23421.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:28] [ns_1@127.0.0.1:<0.23342.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:29] [ns_1@127.0.0.1:<0.23401.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:29] [ns_1@127.0.0.1:<0.23363.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:29] [ns_1@127.0.0.1:<0.23442.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:30] [ns_1@127.0.0.1:<0.23434.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:30] [ns_1@127.0.0.1:<0.23356.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:30] [ns_1@127.0.0.1:<0.23442.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:33:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23418.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:33:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23452.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:33:31] [ns_1@127.0.0.1:<0.23413.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:31] [ns_1@127.0.0.1:<0.23376.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:31] [ns_1@127.0.0.1:<0.23442.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:32] [ns_1@127.0.0.1:<0.23448.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:32] [ns_1@127.0.0.1:<0.23367.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:32] [ns_1@127.0.0.1:<0.23442.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:33] [ns_1@127.0.0.1:<0.23428.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:33] [ns_1@127.0.0.1:<0.23394.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:33:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23442.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:33:34] [ns_1@127.0.0.1:<0.23464.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:34] [ns_1@127.0.0.1:<0.23384.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:35] [ns_1@127.0.0.1:<0.23438.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:35] [ns_1@127.0.0.1:<0.23407.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:33:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23452.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:33:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23486.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:33:36] [ns_1@127.0.0.1:<0.23477.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:36] [ns_1@127.0.0.1:<0.23399.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:37] [ns_1@127.0.0.1:<0.23455.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:37] [ns_1@127.0.0.1:<0.23419.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:38] [ns_1@127.0.0.1:<0.23490.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:38] [ns_1@127.0.0.1:<0.23411.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:39] [ns_1@127.0.0.1:<0.23470.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:39] [ns_1@127.0.0.1:<0.23432.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:39] [ns_1@127.0.0.1:<0.23512.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:40] [ns_1@127.0.0.1:<0.23503.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:40] [ns_1@127.0.0.1:<0.23426.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:40] [ns_1@127.0.0.1:<0.23512.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:33:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23486.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:33:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23521.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:33:41] [ns_1@127.0.0.1:<0.23481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:41] [ns_1@127.0.0.1:<0.23512.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:41] [ns_1@127.0.0.1:<0.23446.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:42] [ns_1@127.0.0.1:<0.23517.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:42] [ns_1@127.0.0.1:<0.23512.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:42] [ns_1@127.0.0.1:<0.23436.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:33:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23512.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:33:44] [ns_1@127.0.0.1:<0.23462.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:44] [ns_1@127.0.0.1:<0.23475.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:44] [ns_1@127.0.0.1:<0.23488.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:44] [ns_1@127.0.0.1:<0.23535.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:45] [ns_1@127.0.0.1:<0.23453.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:45] [ns_1@127.0.0.1:<0.23497.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:33:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23521.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:33:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23556.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:33:46] [ns_1@127.0.0.1:<0.23501.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:46] [ns_1@127.0.0.1:<0.23549.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:47] [ns_1@127.0.0.1:<0.23468.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:47] [ns_1@127.0.0.1:<0.23508.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:48] [ns_1@127.0.0.1:<0.23515.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:48] [ns_1@127.0.0.1:<0.23563.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:49] [ns_1@127.0.0.1:<0.23479.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:49] [ns_1@127.0.0.1:<0.23526.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:49] [ns_1@127.0.0.1:<0.23580.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:50] [ns_1@127.0.0.1:<0.23532.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:50] [ns_1@127.0.0.1:<0.23574.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:50] [ns_1@127.0.0.1:<0.23580.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:33:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23556.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:33:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23589.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:33:51] [ns_1@127.0.0.1:<0.23495.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:51] [ns_1@127.0.0.1:<0.23540.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:51] [ns_1@127.0.0.1:<0.23580.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:52] [ns_1@127.0.0.1:<0.23546.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:52] [ns_1@127.0.0.1:<0.23591.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:52] [ns_1@127.0.0.1:<0.23580.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:53] [ns_1@127.0.0.1:<0.23506.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:53] [ns_1@127.0.0.1:<0.23542.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:33:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23580.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:33:54] [ns_1@127.0.0.1:<0.23559.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:54] [ns_1@127.0.0.1:<0.23606.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:55] [ns_1@127.0.0.1:<0.23522.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:55] [ns_1@127.0.0.1:<0.23544.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:33:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23589.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:33:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23625.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:33:56] [ns_1@127.0.0.1:<0.23572.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:56] [ns_1@127.0.0.1:<0.23618.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:57] [ns_1@127.0.0.1:<0.23551.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:57] [ns_1@127.0.0.1:<0.23557.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:58] [ns_1@127.0.0.1:<0.23585.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:58] [ns_1@127.0.0.1:<0.23633.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:33:59] [ns_1@127.0.0.1:<0.23565.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:33:59] [ns_1@127.0.0.1:<0.23648.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:33:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,754436,850371}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38272072}, {processes,10141336}, {processes_used,8515856}, {system,28130736}, {atom,1306681}, {atom_used,1284164}, {binary,426760}, {code,12859877}, {ets,2429192}]}, {system_stats, [{cpu_utilization_rate,25.376884422110553}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,5458}, {memory_data,{4040077312,4013023232,{<0.18771.0>,604872}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26180 kB\nBuffers: 62912 kB\nCached: 3527372 kB\nSwapCached: 0 kB\nActive: 310652 kB\nInactive: 3440264 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26180 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 20 kB\nWriteback: 12 kB\nAnonPages: 160640 kB\nMapped: 24872 kB\nSlab: 134360 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580596 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3612028928}, {buffered_memory,64421888}, {free_memory,26808320}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{5453586,1}}, {context_switches,{1588500,0}}, {garbage_collection,{843508,1181717405,0}}, {io,{{input,26579728},{output,61307970}}}, {reductions,{336755418,589187}}, {run_queue,0}, {runtime,{69280,190}}]}]}] [stats:error] [2012-03-26 2:33:59] [ns_1@127.0.0.1:<0.23570.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:00] [ns_1@127.0.0.1:<0.23603.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:00] [ns_1@127.0.0.1:<0.23648.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:34:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23625.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:34:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23658.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:34:00] [ns_1@127.0.0.1:<0.23643.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:01] [ns_1@127.0.0.1:<0.23576.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:01] [ns_1@127.0.0.1:<0.23648.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:01] [ns_1@127.0.0.1:<0.23583.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:02] [ns_1@127.0.0.1:<0.23616.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:02] [ns_1@127.0.0.1:<0.23648.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:02] [ns_1@127.0.0.1:<0.23661.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:03] [ns_1@127.0.0.1:<0.23593.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:34:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23648.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:34:03] [ns_1@127.0.0.1:<0.23601.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:04] [ns_1@127.0.0.1:<0.23628.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:04] [ns_1@127.0.0.1:<0.23676.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:05] [ns_1@127.0.0.1:<0.23608.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:34:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23658.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:34:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23692.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:34:05] [ns_1@127.0.0.1:<0.23614.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:06] [ns_1@127.0.0.1:<0.23641.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:06] [ns_1@127.0.0.1:<0.23687.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:07] [ns_1@127.0.0.1:<0.23620.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:07] [ns_1@127.0.0.1:<0.23626.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:08] [ns_1@127.0.0.1:<0.23659.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:08] [ns_1@127.0.0.1:<0.23703.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:09] [ns_1@127.0.0.1:<0.23635.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:09] [ns_1@127.0.0.1:<0.23720.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:09] [ns_1@127.0.0.1:<0.23639.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:10] [ns_1@127.0.0.1:<0.23674.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:10] [ns_1@127.0.0.1:<0.23720.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:34:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23692.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:34:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23729.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:34:10] [ns_1@127.0.0.1:<0.23714.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:11] [ns_1@127.0.0.1:<0.23651.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:11] [ns_1@127.0.0.1:<0.23720.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:11] [ns_1@127.0.0.1:<0.23654.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:12] [ns_1@127.0.0.1:<0.23685.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:12] [ns_1@127.0.0.1:<0.23720.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:13] [ns_1@127.0.0.1:<0.23732.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:13] [ns_1@127.0.0.1:<0.23666.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:34:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23720.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:34:14] [ns_1@127.0.0.1:<0.23670.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:14] [ns_1@127.0.0.1:<0.23701.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:15] [ns_1@127.0.0.1:<0.23747.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:15] [ns_1@127.0.0.1:<0.23681.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:34:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23729.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:34:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23764.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:34:16] [ns_1@127.0.0.1:<0.23683.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:16] [ns_1@127.0.0.1:<0.23712.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:17] [ns_1@127.0.0.1:<0.23759.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:17] [ns_1@127.0.0.1:<0.23694.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:18] [ns_1@127.0.0.1:<0.23696.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:18] [ns_1@127.0.0.1:<0.23730.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:19] [ns_1@127.0.0.1:<0.23773.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:19] [ns_1@127.0.0.1:<0.23707.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:19] [ns_1@127.0.0.1:<0.23788.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:20] [ns_1@127.0.0.1:<0.23709.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:20] [ns_1@127.0.0.1:<0.23745.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:20] [ns_1@127.0.0.1:<0.23788.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:34:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23764.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:34:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23797.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:34:21] [ns_1@127.0.0.1:<0.23784.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:21] [ns_1@127.0.0.1:<0.23723.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:21] [ns_1@127.0.0.1:<0.23788.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:22] [ns_1@127.0.0.1:<0.23725.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:22] [ns_1@127.0.0.1:<0.23757.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:22] [ns_1@127.0.0.1:<0.23788.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:23] [ns_1@127.0.0.1:<0.23801.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:23] [ns_1@127.0.0.1:<0.23740.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:34:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23788.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:34:24] [ns_1@127.0.0.1:<0.23742.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:24] [ns_1@127.0.0.1:<0.23771.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:25] [ns_1@127.0.0.1:<0.23816.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:25] [ns_1@127.0.0.1:<0.23752.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:34:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23797.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:34:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23833.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:34:26] [ns_1@127.0.0.1:<0.23754.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:26] [ns_1@127.0.0.1:<0.23782.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:27] [ns_1@127.0.0.1:<0.23828.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:27] [ns_1@127.0.0.1:<0.23765.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:28] [ns_1@127.0.0.1:<0.23767.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:28] [ns_1@127.0.0.1:<0.23799.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:29] [ns_1@127.0.0.1:<0.23843.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:29] [ns_1@127.0.0.1:<0.23778.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:29] [ns_1@127.0.0.1:<0.23857.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:30] [ns_1@127.0.0.1:<0.23780.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:30] [ns_1@127.0.0.1:<0.23857.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:34:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23833.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:34:31] [ns_1@127.0.0.1:<0.23814.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:info] [2012-03-26 2:34:31] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23865.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:34:31] [ns_1@127.0.0.1:<0.23857.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:31] [ns_1@127.0.0.1:<0.23791.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:32] [ns_1@127.0.0.1:<0.23793.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:32] [ns_1@127.0.0.1:<0.23857.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:32] [ns_1@127.0.0.1:<0.23866.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:33] [ns_1@127.0.0.1:<0.23853.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:34:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23857.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:34:33] [ns_1@127.0.0.1:<0.23809.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:34] [ns_1@127.0.0.1:<0.23811.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:34] [ns_1@127.0.0.1:<0.23826.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:35] [ns_1@127.0.0.1:<0.23871.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:34:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23865.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:34:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23897.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:34:35] [ns_1@127.0.0.1:<0.23822.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:36] [ns_1@127.0.0.1:<0.23824.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:36] [ns_1@127.0.0.1:<0.23841.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:37] [ns_1@127.0.0.1:<0.23886.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:37] [ns_1@127.0.0.1:<0.23834.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:38] [ns_1@127.0.0.1:<0.23836.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:38] [ns_1@127.0.0.1:<0.23851.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:39] [ns_1@127.0.0.1:<0.23899.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:39] [ns_1@127.0.0.1:<0.23923.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:39] [ns_1@127.0.0.1:<0.23847.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:40] [ns_1@127.0.0.1:<0.23849.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:40] [ns_1@127.0.0.1:<0.23923.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:34:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23897.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:34:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23932.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:34:40] [ns_1@127.0.0.1:<0.23881.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:41] [ns_1@127.0.0.1:<0.23912.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:41] [ns_1@127.0.0.1:<0.23923.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:41] [ns_1@127.0.0.1:<0.23861.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:42] [ns_1@127.0.0.1:<0.23878.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:42] [ns_1@127.0.0.1:<0.23923.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:42] [ns_1@127.0.0.1:<0.23892.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:43] [ns_1@127.0.0.1:<0.23926.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:34:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23923.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:34:43] [ns_1@127.0.0.1:<0.23875.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:44] [ns_1@127.0.0.1:<0.23890.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:44] [ns_1@127.0.0.1:<0.23906.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:44] [ns_1@127.0.0.1:<0.23917.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:44] [ns_1@127.0.0.1:<0.23933.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:44] [ns_1@127.0.0.1:<0.23948.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:45] [ns_1@127.0.0.1:<0.23908.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:45] [ns_1@127.0.0.1:<0.23943.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:34:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23932.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:34:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23975.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:34:46] [ns_1@127.0.0.1:<0.23888.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:46] [ns_1@127.0.0.1:<0.23968.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:47] [ns_1@127.0.0.1:<0.23919.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:47] [ns_1@127.0.0.1:<0.23955.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:48] [ns_1@127.0.0.1:<0.23901.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:48] [ns_1@127.0.0.1:<0.23982.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:49] [ns_1@127.0.0.1:<0.23935.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:49] [ns_1@127.0.0.1:<0.23976.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:49] [ns_1@127.0.0.1:<0.23999.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:50] [ns_1@127.0.0.1:<0.23914.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:50] [ns_1@127.0.0.1:<0.23993.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:50] [ns_1@127.0.0.1:<0.23999.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:34:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23975.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:34:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24008.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:34:51] [ns_1@127.0.0.1:<0.23950.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:51] [ns_1@127.0.0.1:<0.23989.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:51] [ns_1@127.0.0.1:<0.23999.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:52] [ns_1@127.0.0.1:<0.23928.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:52] [ns_1@127.0.0.1:<0.24010.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:52] [ns_1@127.0.0.1:<0.23999.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:53] [ns_1@127.0.0.1:<0.23970.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:53] [ns_1@127.0.0.1:<0.24002.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:34:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23999.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:34:54] [ns_1@127.0.0.1:<0.23945.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:54] [ns_1@127.0.0.1:<0.24025.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:55] [ns_1@127.0.0.1:<0.23984.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:34:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24008.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:34:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24042.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:34:57] [ns_1@127.0.0.1:<0.23995.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:57] [ns_1@127.0.0.1:<0.24020.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:58] [ns_1@127.0.0.1:<0.23957.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:58] [ns_1@127.0.0.1:<0.24037.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:34:59] [ns_1@127.0.0.1:<0.24012.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:34:59] [ns_1@127.0.0.1:<0.24061.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:34:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,754496,882451}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38171816}, {processes,10006808}, {processes_used,8381328}, {system,28165008}, {atom,1306681}, {atom_used,1284164}, {binary,425832}, {code,12859877}, {ets,2457832}]}, {system_stats, [{cpu_utilization_rate,25.376884422110553}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,5518}, {memory_data,{4040077312,4013268992,{<0.18771.0>,609096}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26312 kB\nBuffers: 62976 kB\nCached: 3527520 kB\nSwapCached: 0 kB\nActive: 310792 kB\nInactive: 3440352 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26312 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 84 kB\nWriteback: 0 kB\nAnonPages: 160644 kB\nMapped: 24872 kB\nSlab: 134340 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580596 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3612180480}, {buffered_memory,64487424}, {free_memory,26943488}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{5513619,1}}, {context_switches,{1601511,0}}, {garbage_collection,{850374,1192941654,0}}, {io,{{input,26610329},{output,61752951}}}, {reductions,{339304291,663747}}, {run_queue,0}, {runtime,{70010,170}}]}]}] [stats:error] [2012-03-26 2:34:59] [ns_1@127.0.0.1:<0.24033.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:00] [ns_1@127.0.0.1:<0.23959.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:00] [ns_1@127.0.0.1:<0.24061.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:35:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24042.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:35:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24071.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:35:00] [ns_1@127.0.0.1:<0.24046.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:01] [ns_1@127.0.0.1:<0.24027.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:01] [ns_1@127.0.0.1:<0.24061.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:01] [ns_1@127.0.0.1:<0.24052.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:02] [ns_1@127.0.0.1:<0.23961.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:02] [ns_1@127.0.0.1:<0.24061.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:02] [ns_1@127.0.0.1:<0.24056.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:03] [ns_1@127.0.0.1:<0.24048.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:35:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24061.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:35:03] [ns_1@127.0.0.1:<0.24067.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:04] [ns_1@127.0.0.1:<0.23963.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:04] [ns_1@127.0.0.1:<0.24074.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:05] [ns_1@127.0.0.1:<0.24064.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:35:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24071.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:35:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24105.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:35:05] [ns_1@127.0.0.1:<0.24083.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:06] [ns_1@127.0.0.1:<0.23965.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:06] [ns_1@127.0.0.1:<0.24089.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:07] [ns_1@127.0.0.1:<0.24079.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:07] [ns_1@127.0.0.1:<0.24096.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:08] [ns_1@127.0.0.1:<0.23978.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:08] [ns_1@127.0.0.1:<0.24100.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:09] [ns_1@127.0.0.1:<0.24094.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:09] [ns_1@127.0.0.1:<0.24133.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:09] [ns_1@127.0.0.1:<0.24109.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:10] [ns_1@127.0.0.1:<0.23991.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:10] [ns_1@127.0.0.1:<0.24133.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:35:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24105.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:35:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24142.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:35:10] [ns_1@127.0.0.1:<0.24116.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:11] [ns_1@127.0.0.1:<0.24107.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:11] [ns_1@127.0.0.1:<0.24133.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:11] [ns_1@127.0.0.1:<0.24122.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:12] [ns_1@127.0.0.1:<0.24004.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:12] [ns_1@127.0.0.1:<0.24133.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:13] [ns_1@127.0.0.1:<0.24127.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:13] [ns_1@127.0.0.1:<0.24120.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:35:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24133.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:35:14] [ns_1@127.0.0.1:<0.24138.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:14] [ns_1@127.0.0.1:<0.24022.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:15] [ns_1@127.0.0.1:<0.24145.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:15] [ns_1@127.0.0.1:<0.24136.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:35:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24142.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:35:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24177.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:35:16] [ns_1@127.0.0.1:<0.24155.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:16] [ns_1@127.0.0.1:<0.24035.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:17] [ns_1@127.0.0.1:<0.24160.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:17] [ns_1@127.0.0.1:<0.24153.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:18] [ns_1@127.0.0.1:<0.24167.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:18] [ns_1@127.0.0.1:<0.24054.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:19] [ns_1@127.0.0.1:<0.24172.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:19] [ns_1@127.0.0.1:<0.24165.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:19] [ns_1@127.0.0.1:<0.24201.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:20] [ns_1@127.0.0.1:<0.24180.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:20] [ns_1@127.0.0.1:<0.24072.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:20] [ns_1@127.0.0.1:<0.24201.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:35:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24177.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:35:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24210.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:35:21] [ns_1@127.0.0.1:<0.24186.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:21] [ns_1@127.0.0.1:<0.24178.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:21] [ns_1@127.0.0.1:<0.24201.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:22] [ns_1@127.0.0.1:<0.24193.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:22] [ns_1@127.0.0.1:<0.24087.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:22] [ns_1@127.0.0.1:<0.24201.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:23] [ns_1@127.0.0.1:<0.24197.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:23] [ns_1@127.0.0.1:<0.24191.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:35:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24201.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:35:24] [ns_1@127.0.0.1:<0.24206.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:24] [ns_1@127.0.0.1:<0.24098.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:25] [ns_1@127.0.0.1:<0.24214.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:25] [ns_1@127.0.0.1:<0.24204.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:35:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24210.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:35:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24246.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:35:26] [ns_1@127.0.0.1:<0.24224.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:26] [ns_1@127.0.0.1:<0.24114.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:27] [ns_1@127.0.0.1:<0.24229.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:27] [ns_1@127.0.0.1:<0.24222.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:28] [ns_1@127.0.0.1:<0.24237.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:28] [ns_1@127.0.0.1:<0.24125.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:29] [ns_1@127.0.0.1:<0.24241.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:29] [ns_1@127.0.0.1:<0.24235.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:29] [ns_1@127.0.0.1:<0.24270.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:30] [ns_1@127.0.0.1:<0.24249.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:30] [ns_1@127.0.0.1:<0.24143.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:30] [ns_1@127.0.0.1:<0.24270.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:35:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24246.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:35:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24280.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:35:31] [ns_1@127.0.0.1:<0.24256.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:31] [ns_1@127.0.0.1:<0.24270.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:31] [ns_1@127.0.0.1:<0.24247.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:32] [ns_1@127.0.0.1:<0.24262.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:32] [ns_1@127.0.0.1:<0.24270.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:32] [ns_1@127.0.0.1:<0.24158.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:33] [ns_1@127.0.0.1:<0.24266.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:35:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24270.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:35:33] [ns_1@127.0.0.1:<0.24260.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:34] [ns_1@127.0.0.1:<0.24276.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:34] [ns_1@127.0.0.1:<0.24170.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:35] [ns_1@127.0.0.1:<0.24286.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:35:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24280.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:35:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24312.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:35:35] [ns_1@127.0.0.1:<0.24274.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:36] [ns_1@127.0.0.1:<0.24293.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:36] [ns_1@127.0.0.1:<0.24184.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:37] [ns_1@127.0.0.1:<0.24301.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:37] [ns_1@127.0.0.1:<0.24290.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:38] [ns_1@127.0.0.1:<0.24305.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:38] [ns_1@127.0.0.1:<0.24195.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:39] [ns_1@127.0.0.1:<0.24314.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:39] [ns_1@127.0.0.1:<0.24338.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:39] [ns_1@127.0.0.1:<0.24303.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:40] [ns_1@127.0.0.1:<0.24318.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:40] [ns_1@127.0.0.1:<0.24338.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:35:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24312.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:35:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24347.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:35:40] [ns_1@127.0.0.1:<0.24212.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:41] [ns_1@127.0.0.1:<0.24327.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:41] [ns_1@127.0.0.1:<0.24338.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:41] [ns_1@127.0.0.1:<0.24316.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:42] [ns_1@127.0.0.1:<0.24332.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:42] [ns_1@127.0.0.1:<0.24338.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:42] [ns_1@127.0.0.1:<0.24227.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:43] [ns_1@127.0.0.1:<0.24341.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:35:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24338.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:35:43] [ns_1@127.0.0.1:<0.24329.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:44] [ns_1@127.0.0.1:<0.24348.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:44] [ns_1@127.0.0.1:<0.24239.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:44] [ns_1@127.0.0.1:<0.24254.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:45] [ns_1@127.0.0.1:<0.24296.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:45] [ns_1@127.0.0.1:<0.24323.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:45] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 2:35:45] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 2:35:45] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:35:45] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:35:45] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:warn] [2012-03-26 2:35:50] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 2:35:50] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 2:35:50] [ns_1@127.0.0.1:<0.24307.1>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 2:35:50] [ns_1@127.0.0.1:<0.24363.1>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 2:35:50] [ns_1@127.0.0.1:<0.24415.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:50] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 2:35:50] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 2:35:50] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:35:50] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:35:50] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 2:35:50] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [stats:error] [2012-03-26 2:35:50] [ns_1@127.0.0.1:<0.24375.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:50] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:35:50] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:35:50] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 2:35:50] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 02:35:51: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 2:35:50] [ns_1@127.0.0.1:<0.24377.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:35:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24347.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:35:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24428.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:35:51] [ns_1@127.0.0.1:<0.24415.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:51] [ns_1@127.0.0.1:<0.24334.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:51] [ns_1@127.0.0.1:<0.24358.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:52] [ns_1@127.0.0.1:<0.24415.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:52] [ns_1@127.0.0.1:<0.24425.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:52] [ns_1@127.0.0.1:<0.24379.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:53] [ns_1@127.0.0.1:<0.24415.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:53] [ns_1@127.0.0.1:<0.24350.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:53] [ns_1@127.0.0.1:<0.24370.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:54] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:35:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24415.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:35:54] [ns_1@127.0.0.1:<0.24443.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:54] [ns_1@127.0.0.1:<0.24382.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:55] [ns_1@127.0.0.1:<0.24365.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:55] [ns_1@127.0.0.1:<0.24408.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:35:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24428.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:35:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24465.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:35:56] [ns_1@127.0.0.1:<0.24456.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:56] [ns_1@127.0.0.1:<0.24385.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:57] [ns_1@127.0.0.1:<0.24433.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:57] [ns_1@127.0.0.1:<0.24409.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:58] [ns_1@127.0.0.1:<0.24468.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:58] [ns_1@127.0.0.1:<0.24387.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:59] [ns_1@127.0.0.1:<0.24448.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:35:59] [ns_1@127.0.0.1:<0.24410.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:35:59] [ns_1@127.0.0.1:<0.24503.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:35:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,754556,911600}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38256992}, {processes,10081616}, {processes_used,8456136}, {system,28175376}, {atom,1306681}, {atom_used,1284164}, {binary,459880}, {code,12859877}, {ets,2427192}]}, {system_stats, [{cpu_utilization_rate,25.40106951871658}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,5578}, {memory_data,{4040077312,4013514752,{<0.18771.0>,613320}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25792 kB\nBuffers: 63020 kB\nCached: 3527684 kB\nSwapCached: 0 kB\nActive: 311128 kB\nInactive: 3440384 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25792 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 100 kB\nWriteback: 0 kB\nAnonPages: 160688 kB\nMapped: 24872 kB\nSlab: 134460 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582796 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3612348416}, {buffered_memory,64532480}, {free_memory,26411008}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{5573648,1}}, {context_switches,{1614643,0}}, {garbage_collection,{857126,1204370292,0}}, {io,{{input,26644047},{output,62202827}}}, {reductions,{341848261,662658}}, {run_queue,0}, {runtime,{70800,230}}]}]}] [stats:error] [2012-03-26 2:36:00] [ns_1@127.0.0.1:<0.24481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:00] [ns_1@127.0.0.1:<0.24503.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:36:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24465.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:36:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24513.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:36:00] [ns_1@127.0.0.1:<0.24389.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:01] [ns_1@127.0.0.1:<0.24460.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:01] [ns_1@127.0.0.1:<0.24503.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:01] [ns_1@127.0.0.1:<0.24411.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:02] [ns_1@127.0.0.1:<0.24514.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:02] [ns_1@127.0.0.1:<0.24503.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:02] [ns_1@127.0.0.1:<0.24431.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:03] [ns_1@127.0.0.1:<0.24475.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:36:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24503.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:36:03] [ns_1@127.0.0.1:<0.24418.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:04] [ns_1@127.0.0.1:<0.24529.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:04] [ns_1@127.0.0.1:<0.24446.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:05] [ns_1@127.0.0.1:<0.24498.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:36:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24513.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:36:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24547.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:36:05] [ns_1@127.0.0.1:<0.24441.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:06] [ns_1@127.0.0.1:<0.24540.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:06] [ns_1@127.0.0.1:<0.24458.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:07] [ns_1@127.0.0.1:<0.24521.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:07] [ns_1@127.0.0.1:<0.24454.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:08] [ns_1@127.0.0.1:<0.24556.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:08] [ns_1@127.0.0.1:<0.24473.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:09] [ns_1@127.0.0.1:<0.24536.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:09] [ns_1@127.0.0.1:<0.24575.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:09] [ns_1@127.0.0.1:<0.24466.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:10] [ns_1@127.0.0.1:<0.24567.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:10] [ns_1@127.0.0.1:<0.24575.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:36:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24547.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:36:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24584.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:36:11] [ns_1@127.0.0.1:<0.24483.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:11] [ns_1@127.0.0.1:<0.24549.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:11] [ns_1@127.0.0.1:<0.24575.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:12] [ns_1@127.0.0.1:<0.24479.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:12] [ns_1@127.0.0.1:<0.24585.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:12] [ns_1@127.0.0.1:<0.24575.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:13] [ns_1@127.0.0.1:<0.24516.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:13] [ns_1@127.0.0.1:<0.24562.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:36:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24575.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:36:14] [ns_1@127.0.0.1:<0.24507.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:14] [ns_1@127.0.0.1:<0.24600.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:15] [ns_1@127.0.0.1:<0.24531.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:15] [ns_1@127.0.0.1:<0.24578.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:36:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24584.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:36:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24619.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:36:16] [ns_1@127.0.0.1:<0.24525.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:16] [ns_1@127.0.0.1:<0.24612.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:17] [ns_1@127.0.0.1:<0.24542.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:17] [ns_1@127.0.0.1:<0.24595.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:18] [ns_1@127.0.0.1:<0.24538.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:18] [ns_1@127.0.0.1:<0.24626.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:19] [ns_1@127.0.0.1:<0.24558.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:19] [ns_1@127.0.0.1:<0.24607.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:19] [ns_1@127.0.0.1:<0.24643.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:20] [ns_1@127.0.0.1:<0.24551.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:20] [ns_1@127.0.0.1:<0.24637.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:20] [ns_1@127.0.0.1:<0.24643.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:36:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24619.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:36:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24652.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:36:21] [ns_1@127.0.0.1:<0.24569.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:21] [ns_1@127.0.0.1:<0.24620.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:21] [ns_1@127.0.0.1:<0.24643.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:22] [ns_1@127.0.0.1:<0.24564.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:22] [ns_1@127.0.0.1:<0.24654.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:22] [ns_1@127.0.0.1:<0.24643.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:23] [ns_1@127.0.0.1:<0.24587.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:23] [ns_1@127.0.0.1:<0.24633.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:36:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24643.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:36:24] [ns_1@127.0.0.1:<0.24580.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:24] [ns_1@127.0.0.1:<0.24669.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:25] [ns_1@127.0.0.1:<0.24602.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:25] [ns_1@127.0.0.1:<0.24646.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:36:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24652.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:36:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24688.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:36:26] [ns_1@127.0.0.1:<0.24597.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:26] [ns_1@127.0.0.1:<0.24681.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:27] [ns_1@127.0.0.1:<0.24614.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:27] [ns_1@127.0.0.1:<0.24664.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:28] [ns_1@127.0.0.1:<0.24609.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:28] [ns_1@127.0.0.1:<0.24696.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:29] [ns_1@127.0.0.1:<0.24628.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:29] [ns_1@127.0.0.1:<0.24710.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:29] [ns_1@127.0.0.1:<0.24677.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:30] [ns_1@127.0.0.1:<0.24622.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:30] [ns_1@127.0.0.1:<0.24710.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:30] [ns_1@127.0.0.1:<0.24706.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:36:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24688.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:36:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24722.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:36:31] [ns_1@127.0.0.1:<0.24639.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:31] [ns_1@127.0.0.1:<0.24710.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:31] [ns_1@127.0.0.1:<0.24689.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:32] [ns_1@127.0.0.1:<0.24635.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:32] [ns_1@127.0.0.1:<0.24710.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:32] [ns_1@127.0.0.1:<0.24723.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:33] [ns_1@127.0.0.1:<0.24656.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:36:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24710.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:36:33] [ns_1@127.0.0.1:<0.24702.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:34] [ns_1@127.0.0.1:<0.24648.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:34] [ns_1@127.0.0.1:<0.24738.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:36:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24722.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:36:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24753.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:36:37] [ns_1@127.0.0.1:<0.24671.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:37] [ns_1@127.0.0.1:<0.24713.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:38] [ns_1@127.0.0.1:<0.24716.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:38] [ns_1@127.0.0.1:<0.24719.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:39] [ns_1@127.0.0.1:<0.24683.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:39] [ns_1@127.0.0.1:<0.24772.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:39] [ns_1@127.0.0.1:<0.24728.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:40] [ns_1@127.0.0.1:<0.24732.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:40] [ns_1@127.0.0.1:<0.24772.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:40] [ns_1@127.0.0.1:<0.24736.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:36:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24753.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:36:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24783.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:36:41] [ns_1@127.0.0.1:<0.24698.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:41] [ns_1@127.0.0.1:<0.24772.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:41] [ns_1@127.0.0.1:<0.24743.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:42] [ns_1@127.0.0.1:<0.24745.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:42] [ns_1@127.0.0.1:<0.24772.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:42] [ns_1@127.0.0.1:<0.24747.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:43] [ns_1@127.0.0.1:<0.24761.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:36:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24772.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:36:43] [ns_1@127.0.0.1:<0.24763.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:44] [ns_1@127.0.0.1:<0.24666.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:44] [ns_1@127.0.0.1:<0.24757.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:45] [ns_1@127.0.0.1:<0.24775.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:36:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24783.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:36:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24816.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:36:45] [ns_1@127.0.0.1:<0.24777.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:46] [ns_1@127.0.0.1:<0.24679.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:46] [ns_1@127.0.0.1:<0.24768.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:47] [ns_1@127.0.0.1:<0.24790.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:47] [ns_1@127.0.0.1:<0.24794.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:48] [ns_1@127.0.0.1:<0.24691.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:48] [ns_1@127.0.0.1:<0.24784.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:49] [ns_1@127.0.0.1:<0.24804.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:49] [ns_1@127.0.0.1:<0.24838.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:49] [ns_1@127.0.0.1:<0.24806.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:36:50] [ns_1@127.0.0.1:<0.24704.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:50] [ns_1@127.0.0.1:<0.24838.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:36:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24816.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:36:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24850.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:36:51] [ns_1@127.0.0.1:<0.24838.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:52] [ns_1@127.0.0.1:<0.24838.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:36:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:36:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24838.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 2:36:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24850.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:36:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24865.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:36:59] [ns_1@127.0.0.1:<0.24874.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:36:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,754616,948433}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38447208}, {processes,10243264}, {processes_used,8617784}, {system,28203944}, {atom,1306681}, {atom_used,1284164}, {binary,449992}, {code,12859877}, {ets,2458392}]}, {system_stats, [{cpu_utilization_rate,25.31645569620253}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,5638}, {memory_data,{4040077312,4013649920,{<0.18771.0>,617544}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26196 kB\nBuffers: 63108 kB\nCached: 3527824 kB\nSwapCached: 0 kB\nActive: 311080 kB\nInactive: 3440524 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26196 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 64 kB\nWriteback: 0 kB\nAnonPages: 160668 kB\nMapped: 24872 kB\nSlab: 134416 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582796 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3612491776}, {buffered_memory,64622592}, {free_memory,26824704}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{5633683,0}}, {context_switches,{1627573,0}}, {garbage_collection,{863725,1214976694,0}}, {io,{{input,26931895},{output,62895806}}}, {reductions,{344328172,573315}}, {run_queue,0}, {runtime,{71610,180}}]}]}] [stats:error] [2012-03-26 2:37:00] [ns_1@127.0.0.1:<0.24765.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:00] [ns_1@127.0.0.1:<0.24874.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:37:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24865.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:37:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24882.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:37:01] [ns_1@127.0.0.1:<0.24799.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:01] [ns_1@127.0.0.1:<0.24817.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:01] [ns_1@127.0.0.1:<0.24874.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:02] [ns_1@127.0.0.1:<0.24819.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:02] [ns_1@127.0.0.1:<0.24780.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:02] [ns_1@127.0.0.1:<0.24874.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:03] [ns_1@127.0.0.1:<0.24811.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:03] [ns_1@127.0.0.1:<0.24830.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:37:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24874.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:37:04] [ns_1@127.0.0.1:<0.24832.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:04] [ns_1@127.0.0.1:<0.24797.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:05] [ns_1@127.0.0.1:<0.24825.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:05] [ns_1@127.0.0.1:<0.24843.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:37:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24882.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:37:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24917.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:37:06] [ns_1@127.0.0.1:<0.24845.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:06] [ns_1@127.0.0.1:<0.24809.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:07] [ns_1@127.0.0.1:<0.24836.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:07] [ns_1@127.0.0.1:<0.24892.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:08] [ns_1@127.0.0.1:<0.24878.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:09] [ns_1@127.0.0.1:<0.24938.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:10] [ns_1@127.0.0.1:<0.24938.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:37:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24917.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:37:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24943.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:37:11] [ns_1@127.0.0.1:<0.24885.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:11] [ns_1@127.0.0.1:<0.24905.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:11] [ns_1@127.0.0.1:<0.24938.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:12] [ns_1@127.0.0.1:<0.24894.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:12] [ns_1@127.0.0.1:<0.24821.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:12] [ns_1@127.0.0.1:<0.24938.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:37:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24938.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:37:15] [ns_1@127.0.0.1:<0.24900.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:37:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24943.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:37:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24968.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:37:16] [ns_1@127.0.0.1:<0.24918.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:16] [ns_1@127.0.0.1:<0.24907.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:17] [ns_1@127.0.0.1:<0.24898.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:17] [ns_1@127.0.0.1:<0.24911.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:18] [ns_1@127.0.0.1:<0.24931.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:18] [ns_1@127.0.0.1:<0.24920.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:19] [ns_1@127.0.0.1:<0.24909.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:19] [ns_1@127.0.0.1:<0.24927.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:19] [ns_1@127.0.0.1:<0.24992.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:20] [ns_1@127.0.0.1:<0.24954.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:20] [ns_1@127.0.0.1:<0.24956.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:20] [ns_1@127.0.0.1:<0.24992.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:37:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24968.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:37:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25002.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:37:21] [ns_1@127.0.0.1:<0.24925.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:21] [ns_1@127.0.0.1:<0.24946.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:21] [ns_1@127.0.0.1:<0.24992.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:22] [ns_1@127.0.0.1:<0.24971.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:22] [ns_1@127.0.0.1:<0.24834.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:22] [ns_1@127.0.0.1:<0.24992.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:23] [ns_1@127.0.0.1:<0.24944.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:23] [ns_1@127.0.0.1:<0.24969.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:37:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24992.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:37:24] [ns_1@127.0.0.1:<0.24984.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:24] [ns_1@127.0.0.1:<0.24883.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:25] [ns_1@127.0.0.1:<0.24963.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:25] [ns_1@127.0.0.1:<0.24982.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:37:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25002.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:37:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25037.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:37:26] [ns_1@127.0.0.1:<0.24997.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:26] [ns_1@127.0.0.1:<0.24975.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:27] [ns_1@127.0.0.1:<0.24977.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:27] [ns_1@127.0.0.1:<0.24995.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:28] [ns_1@127.0.0.1:<0.25015.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:28] [ns_1@127.0.0.1:<0.24986.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:29] [ns_1@127.0.0.1:<0.24988.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:29] [ns_1@127.0.0.1:<0.25013.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:29] [ns_1@127.0.0.1:<0.25061.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:30] [ns_1@127.0.0.1:<0.25028.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:30] [ns_1@127.0.0.1:<0.25003.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:30] [ns_1@127.0.0.1:<0.25061.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:37:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25037.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:37:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25071.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:37:31] [ns_1@127.0.0.1:<0.25005.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:31] [ns_1@127.0.0.1:<0.25026.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:31] [ns_1@127.0.0.1:<0.25061.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:32] [ns_1@127.0.0.1:<0.25040.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:32] [ns_1@127.0.0.1:<0.25061.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:32] [ns_1@127.0.0.1:<0.25018.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:33] [ns_1@127.0.0.1:<0.25020.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:37:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25061.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:37:33] [ns_1@127.0.0.1:<0.25038.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:34] [ns_1@127.0.0.1:<0.25053.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:34] [ns_1@127.0.0.1:<0.25030.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:35] [ns_1@127.0.0.1:<0.25032.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:35] [ns_1@127.0.0.1:<0.25051.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:37:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25071.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:37:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25106.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:37:36] [ns_1@127.0.0.1:<0.25067.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:36] [ns_1@127.0.0.1:<0.25045.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:37] [ns_1@127.0.0.1:<0.25047.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:37] [ns_1@127.0.0.1:<0.25065.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:38] [ns_1@127.0.0.1:<0.25084.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:38] [ns_1@127.0.0.1:<0.25055.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:39] [ns_1@127.0.0.1:<0.25057.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:39] [ns_1@127.0.0.1:<0.25129.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:39] [ns_1@127.0.0.1:<0.25081.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:40] [ns_1@127.0.0.1:<0.25096.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:40] [ns_1@127.0.0.1:<0.25129.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:37:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25106.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:37:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25138.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:37:40] [ns_1@127.0.0.1:<0.25072.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:41] [ns_1@127.0.0.1:<0.25074.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:41] [ns_1@127.0.0.1:<0.25129.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:41] [ns_1@127.0.0.1:<0.25094.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:42] [ns_1@127.0.0.1:<0.25109.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:42] [ns_1@127.0.0.1:<0.25129.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:42] [ns_1@127.0.0.1:<0.25087.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:43] [ns_1@127.0.0.1:<0.25092.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:37:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25129.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:37:43] [ns_1@127.0.0.1:<0.25107.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:44] [ns_1@127.0.0.1:<0.25123.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:44] [ns_1@127.0.0.1:<0.25098.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:45] [ns_1@127.0.0.1:<0.25103.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:37:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25138.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:37:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25173.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:37:45] [ns_1@127.0.0.1:<0.25120.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:46] [ns_1@127.0.0.1:<0.25139.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:47] [ns_1@127.0.0.1:<0.25114.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:47] [ns_1@127.0.0.1:<0.25118.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:48] [ns_1@127.0.0.1:<0.25134.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:48] [ns_1@127.0.0.1:<0.25154.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:49] [ns_1@127.0.0.1:<0.25125.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:49] [ns_1@127.0.0.1:<0.25132.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:49] [ns_1@127.0.0.1:<0.25197.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:50] [ns_1@127.0.0.1:<0.25151.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:50] [ns_1@127.0.0.1:<0.25166.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:50] [ns_1@127.0.0.1:<0.25197.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:37:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25173.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:37:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25207.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:37:51] [ns_1@127.0.0.1:<0.25141.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:51] [ns_1@127.0.0.1:<0.25147.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:51] [ns_1@127.0.0.1:<0.25197.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:52] [ns_1@127.0.0.1:<0.25163.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:52] [ns_1@127.0.0.1:<0.25180.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:52] [ns_1@127.0.0.1:<0.25197.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:53] [ns_1@127.0.0.1:<0.25156.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:53] [ns_1@127.0.0.1:<0.25161.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:37:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25197.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:37:54] [ns_1@127.0.0.1:<0.25176.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:54] [ns_1@127.0.0.1:<0.25191.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:55] [ns_1@127.0.0.1:<0.25168.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:55] [ns_1@127.0.0.1:<0.25174.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:37:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25207.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:37:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25242.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:37:56] [ns_1@127.0.0.1:<0.25189.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:56] [ns_1@127.0.0.1:<0.25208.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:57] [ns_1@127.0.0.1:<0.25182.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:57] [ns_1@127.0.0.1:<0.25187.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:58] [ns_1@127.0.0.1:<0.25202.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:58] [ns_1@127.0.0.1:<0.25223.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:59] [ns_1@127.0.0.1:<0.25193.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:37:59] [ns_1@127.0.0.1:<0.25200.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:37:59] [ns_1@127.0.0.1:<0.25267.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:37:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,754676,980407}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38427672}, {processes,10241672}, {processes_used,8616192}, {system,28186000}, {atom,1306681}, {atom_used,1284164}, {binary,452480}, {code,12859877}, {ets,2431312}]}, {system_stats, [{cpu_utilization_rate,25.376884422110553}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,5698}, {memory_data,{4040077312,4013260800,{<0.18771.0>,621768}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25452 kB\nBuffers: 63168 kB\nCached: 3527992 kB\nSwapCached: 0 kB\nActive: 311208 kB\nInactive: 3440636 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25452 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 80 kB\nWriteback: 4 kB\nAnonPages: 160684 kB\nMapped: 24872 kB\nSlab: 134372 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 585284 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3612663808}, {buffered_memory,64684032}, {free_memory,26062848}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{5693716,1}}, {context_switches,{1639277,0}}, {garbage_collection,{869971,1224207400,0}}, {io,{{input,26962403},{output,63284964}}}, {reductions,{346544812,586440}}, {run_queue,0}, {runtime,{72280,180}}]}]}] [stats:error] [2012-03-26 2:38:00] [ns_1@127.0.0.1:<0.25220.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:00] [ns_1@127.0.0.1:<0.25235.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:00] [ns_1@127.0.0.1:<0.25250.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:00] [ns_1@127.0.0.1:<0.25260.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:00] [ns_1@127.0.0.1:<0.25267.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:38:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25242.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:38:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25281.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:38:01] [ns_1@127.0.0.1:<0.25210.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:01] [ns_1@127.0.0.1:<0.25267.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:02] [ns_1@127.0.0.1:<0.25267.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [stats:error] [2012-03-26 2:38:01] [ns_1@127.0.0.1:<0.25218.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:38:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25267.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:38:05] [ns_1@127.0.0.1:<0.25282.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:05] [ns_1@127.0.0.1:<0.25284.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:38:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25281.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:38:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25304.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:38:06] [ns_1@127.0.0.1:<0.25233.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:06] [ns_1@127.0.0.1:<0.25296.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:07] [ns_1@127.0.0.1:<0.25225.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:07] [ns_1@127.0.0.1:<0.25231.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:08] [ns_1@127.0.0.1:<0.25245.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:08] [ns_1@127.0.0.1:<0.25312.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:09] [ns_1@127.0.0.1:<0.25237.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:09] [ns_1@127.0.0.1:<0.25329.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:09] [ns_1@127.0.0.1:<0.25243.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:10] [ns_1@127.0.0.1:<0.25258.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:10] [ns_1@127.0.0.1:<0.25329.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:10] [ns_1@127.0.0.1:<0.25323.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:38:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25304.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:38:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25340.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:38:11] [ns_1@127.0.0.1:<0.25252.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:11] [ns_1@127.0.0.1:<0.25329.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:11] [ns_1@127.0.0.1:<0.25256.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:12] [ns_1@127.0.0.1:<0.25273.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:12] [ns_1@127.0.0.1:<0.25329.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [stats:error] [2012-03-26 2:38:12] [ns_1@127.0.0.1:<0.25341.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:38:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25329.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 2:38:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25340.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:38:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25363.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:38:16] [ns_1@127.0.0.1:<0.25275.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:17] [ns_1@127.0.0.1:<0.25262.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:17] [ns_1@127.0.0.1:<0.25298.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:18] [ns_1@127.0.0.1:<0.25271.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:18] [ns_1@127.0.0.1:<0.25277.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:19] [ns_1@127.0.0.1:<0.25370.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:19] [ns_1@127.0.0.1:<0.25316.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:19] [ns_1@127.0.0.1:<0.25385.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:20] [ns_1@127.0.0.1:<0.25305.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:20] [ns_1@127.0.0.1:<0.25307.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:20] [ns_1@127.0.0.1:<0.25385.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:38:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25363.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:38:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25395.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:38:21] [ns_1@127.0.0.1:<0.25381.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:21] [ns_1@127.0.0.1:<0.25332.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:21] [ns_1@127.0.0.1:<0.25385.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:22] [ns_1@127.0.0.1:<0.25385.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [stats:error] [2012-03-26 2:38:22] [ns_1@127.0.0.1:<0.25318.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:38:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25385.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:38:25] [ns_1@127.0.0.1:<0.25347.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:38:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25395.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:38:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25418.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:38:26] [ns_1@127.0.0.1:<0.25334.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:26] [ns_1@127.0.0.1:<0.25320.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:27] [ns_1@127.0.0.1:<0.25398.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:27] [ns_1@127.0.0.1:<0.25375.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:28] [ns_1@127.0.0.1:<0.25351.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:28] [ns_1@127.0.0.1:<0.25337.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:29] [ns_1@127.0.0.1:<0.25413.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:29] [ns_1@127.0.0.1:<0.25440.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:29] [ns_1@127.0.0.1:<0.25388.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:30] [ns_1@127.0.0.1:<0.25364.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:30] [ns_1@127.0.0.1:<0.25440.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:30] [ns_1@127.0.0.1:<0.25354.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:38:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25418.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:38:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25452.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:38:31] [ns_1@127.0.0.1:<0.25430.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:31] [ns_1@127.0.0.1:<0.25440.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:31] [ns_1@127.0.0.1:<0.25406.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:32] [ns_1@127.0.0.1:<0.25377.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:32] [ns_1@127.0.0.1:<0.25440.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:32] [ns_1@127.0.0.1:<0.25368.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:33] [ns_1@127.0.0.1:<0.25444.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:38:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25440.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:38:33] [ns_1@127.0.0.1:<0.25419.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:34] [ns_1@127.0.0.1:<0.25390.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:34] [ns_1@127.0.0.1:<0.25379.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:35] [ns_1@127.0.0.1:<0.25458.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:38:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25452.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:38:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25485.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:38:35] [ns_1@127.0.0.1:<0.25432.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:36] [ns_1@127.0.0.1:<0.25421.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:36] [ns_1@127.0.0.1:<0.25396.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:37] [ns_1@127.0.0.1:<0.25473.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:37] [ns_1@127.0.0.1:<0.25446.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:38] [ns_1@127.0.0.1:<0.25434.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:38] [ns_1@127.0.0.1:<0.25426.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:39] [ns_1@127.0.0.1:<0.25486.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:39] [ns_1@127.0.0.1:<0.25510.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:39] [ns_1@127.0.0.1:<0.25462.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:40] [ns_1@127.0.0.1:<0.25449.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:40] [ns_1@127.0.0.1:<0.25510.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:38:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25485.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:38:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25519.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:38:41] [ns_1@127.0.0.1:<0.25436.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:41] [ns_1@127.0.0.1:<0.25499.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:41] [ns_1@127.0.0.1:<0.25510.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:42] [ns_1@127.0.0.1:<0.25475.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:42] [ns_1@127.0.0.1:<0.25466.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:42] [ns_1@127.0.0.1:<0.25510.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:43] [ns_1@127.0.0.1:<0.25453.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:43] [ns_1@127.0.0.1:<0.25513.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:38:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25510.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:38:44] [ns_1@127.0.0.1:<0.25488.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:44] [ns_1@127.0.0.1:<0.25477.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:45] [ns_1@127.0.0.1:<0.25468.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:45] [ns_1@127.0.0.1:<0.25530.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:38:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25519.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:38:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25554.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:38:46] [ns_1@127.0.0.1:<0.25501.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:46] [ns_1@127.0.0.1:<0.25490.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:47] [ns_1@127.0.0.1:<0.25479.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:47] [ns_1@127.0.0.1:<0.25542.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:48] [ns_1@127.0.0.1:<0.25515.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:48] [ns_1@127.0.0.1:<0.25504.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:49] [ns_1@127.0.0.1:<0.25495.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:49] [ns_1@127.0.0.1:<0.25555.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:49] [ns_1@127.0.0.1:<0.25578.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:50] [ns_1@127.0.0.1:<0.25532.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:50] [ns_1@127.0.0.1:<0.25520.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:50] [ns_1@127.0.0.1:<0.25578.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:38:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25554.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:38:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25588.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:38:51] [ns_1@127.0.0.1:<0.25506.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:51] [ns_1@127.0.0.1:<0.25568.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:51] [ns_1@127.0.0.1:<0.25578.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:52] [ns_1@127.0.0.1:<0.25544.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:52] [ns_1@127.0.0.1:<0.25535.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:52] [ns_1@127.0.0.1:<0.25578.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:53] [ns_1@127.0.0.1:<0.25522.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:53] [ns_1@127.0.0.1:<0.25581.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:38:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25578.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:38:54] [ns_1@127.0.0.1:<0.25557.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:54] [ns_1@127.0.0.1:<0.25547.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:55] [ns_1@127.0.0.1:<0.25537.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:55] [ns_1@127.0.0.1:<0.25599.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:38:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25588.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:38:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25623.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:38:56] [ns_1@127.0.0.1:<0.25570.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:56] [ns_1@127.0.0.1:<0.25561.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:57] [ns_1@127.0.0.1:<0.25549.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:57] [ns_1@127.0.0.1:<0.25612.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:58] [ns_1@127.0.0.1:<0.25583.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:58] [ns_1@127.0.0.1:<0.25572.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:38:59] [ns_1@127.0.0.1:<0.25563.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:38:59] [ns_1@127.0.0.1:<0.25659.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:38:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,754737,12380}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38536928}, {processes,10314600}, {processes_used,8689120}, {system,28222328}, {atom,1306681}, {atom_used,1284164}, {binary,453584}, {code,12859877}, {ets,2460048}]}, {system_stats, [{cpu_utilization_rate,25.628140703517587}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,5758}, {memory_data,{4040077312,4014014464,{<0.18771.0>,625992}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25436 kB\nBuffers: 63232 kB\nCached: 3528104 kB\nSwapCached: 0 kB\nActive: 311328 kB\nInactive: 3440720 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25436 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 8 kB\nWriteback: 0 kB\nAnonPages: 160688 kB\nMapped: 24872 kB\nSlab: 134368 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 585284 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3612778496}, {buffered_memory,64749568}, {free_memory,26046464}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{5753749,0}}, {context_switches,{1651466,0}}, {garbage_collection,{876552,1234144887,0}}, {io,{{input,26992923},{output,63693417}}}, {reductions,{348890231,612915}}, {run_queue,0}, {runtime,{72980,200}}]}]}] [stats:error] [2012-03-26 2:38:59] [ns_1@127.0.0.1:<0.25624.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:00] [ns_1@127.0.0.1:<0.25601.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:00] [ns_1@127.0.0.1:<0.25659.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:00] [ns_1@127.0.0.1:<0.25589.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:00] [ns_1@127.0.0.1:<0.25604.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:00] [ns_1@127.0.0.1:<0.25616.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:39:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25623.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:39:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25675.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:39:01] [ns_1@127.0.0.1:<0.25574.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:01] [ns_1@127.0.0.1:<0.25659.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:01] [ns_1@127.0.0.1:<0.25637.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:02] [ns_1@127.0.0.1:<0.25614.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:02] [ns_1@127.0.0.1:<0.25659.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:02] [ns_1@127.0.0.1:<0.25631.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:03] [ns_1@127.0.0.1:<0.25591.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:39:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25659.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:39:03] [ns_1@127.0.0.1:<0.25665.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:04] [ns_1@127.0.0.1:<0.25626.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:04] [ns_1@127.0.0.1:<0.25641.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:05] [ns_1@127.0.0.1:<0.25606.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:39:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25675.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:39:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25708.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:39:05] [ns_1@127.0.0.1:<0.25685.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:06] [ns_1@127.0.0.1:<0.25639.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:06] [ns_1@127.0.0.1:<0.25676.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:07] [ns_1@127.0.0.1:<0.25618.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:07] [ns_1@127.0.0.1:<0.25698.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:08] [ns_1@127.0.0.1:<0.25668.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:08] [ns_1@127.0.0.1:<0.25691.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:09] [ns_1@127.0.0.1:<0.25635.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:09] [ns_1@127.0.0.1:<0.25735.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:09] [ns_1@127.0.0.1:<0.25711.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:10] [ns_1@127.0.0.1:<0.25670.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:10] [ns_1@127.0.0.1:<0.25735.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:39:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25708.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:39:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25744.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:39:11] [ns_1@127.0.0.1:<0.25702.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:11] [ns_1@127.0.0.1:<0.25662.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:11] [ns_1@127.0.0.1:<0.25735.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:12] [ns_1@127.0.0.1:<0.25724.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:12] [ns_1@127.0.0.1:<0.25672.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:12] [ns_1@127.0.0.1:<0.25735.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:13] [ns_1@127.0.0.1:<0.25718.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:13] [ns_1@127.0.0.1:<0.25681.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:39:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25735.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:39:14] [ns_1@127.0.0.1:<0.25740.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:14] [ns_1@127.0.0.1:<0.25689.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:15] [ns_1@127.0.0.1:<0.25729.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:15] [ns_1@127.0.0.1:<0.25696.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:39:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25744.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:39:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25779.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:39:16] [ns_1@127.0.0.1:<0.25757.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:16] [ns_1@127.0.0.1:<0.25700.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:17] [ns_1@127.0.0.1:<0.25747.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:17] [ns_1@127.0.0.1:<0.25709.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:18] [ns_1@127.0.0.1:<0.25769.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:18] [ns_1@127.0.0.1:<0.25713.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:19] [ns_1@127.0.0.1:<0.25762.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:19] [ns_1@127.0.0.1:<0.25722.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:19] [ns_1@127.0.0.1:<0.25803.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:20] [ns_1@127.0.0.1:<0.25803.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:20] [ns_1@127.0.0.1:<0.25782.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:39:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25779.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:39:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25811.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:39:21] [ns_1@127.0.0.1:<0.25803.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:22] [ns_1@127.0.0.1:<0.25755.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:22] [ns_1@127.0.0.1:<0.25795.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:22] [ns_1@127.0.0.1:<0.25803.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:23] [ns_1@127.0.0.1:<0.25760.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:23] [ns_1@127.0.0.1:<0.25774.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:39:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25803.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:39:24] [ns_1@127.0.0.1:<0.25767.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:24] [ns_1@127.0.0.1:<0.25727.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:25] [ns_1@127.0.0.1:<0.25772.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:25] [ns_1@127.0.0.1:<0.25788.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:39:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25811.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:39:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25842.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:39:26] [ns_1@127.0.0.1:<0.25780.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:26] [ns_1@127.0.0.1:<0.25745.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:27] [ns_1@127.0.0.1:<0.25797.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:27] [ns_1@127.0.0.1:<0.25799.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:28] [ns_1@127.0.0.1:<0.25806.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:28] [ns_1@127.0.0.1:<0.25823.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:29] [ns_1@127.0.0.1:<0.25825.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:29] [ns_1@127.0.0.1:<0.25738.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:29] [ns_1@127.0.0.1:<0.25866.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:30] [ns_1@127.0.0.1:<0.25820.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:30] [ns_1@127.0.0.1:<0.25835.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:30] [ns_1@127.0.0.1:<0.25866.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:39:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25842.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:39:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25876.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:39:31] [ns_1@127.0.0.1:<0.25837.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:31] [ns_1@127.0.0.1:<0.25818.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:31] [ns_1@127.0.0.1:<0.25866.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:32] [ns_1@127.0.0.1:<0.25833.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:32] [ns_1@127.0.0.1:<0.25786.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:32] [ns_1@127.0.0.1:<0.25866.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:33] [ns_1@127.0.0.1:<0.25852.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:33] [ns_1@127.0.0.1:<0.25831.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:39:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25866.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:39:34] [ns_1@127.0.0.1:<0.25845.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:34] [ns_1@127.0.0.1:<0.25850.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:35] [ns_1@127.0.0.1:<0.25862.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:35] [ns_1@127.0.0.1:<0.25843.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:39:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25876.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:39:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25911.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:39:36] [ns_1@127.0.0.1:<0.25858.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:36] [ns_1@127.0.0.1:<0.25860.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:37] [ns_1@127.0.0.1:<0.25879.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:37] [ns_1@127.0.0.1:<0.25793.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:38] [ns_1@127.0.0.1:<0.25872.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:38] [ns_1@127.0.0.1:<0.25892.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:39] [ns_1@127.0.0.1:<0.25894.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:39] [ns_1@127.0.0.1:<0.25934.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:40] [ns_1@127.0.0.1:<0.25934.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:39] [ns_1@127.0.0.1:<0.25856.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:39:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25911.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:39:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25941.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:39:41] [ns_1@127.0.0.1:<0.25905.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:41] [ns_1@127.0.0.1:<0.25934.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:42] [ns_1@127.0.0.1:<0.25925.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:42] [ns_1@127.0.0.1:<0.25934.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:42] [ns_1@127.0.0.1:<0.25927.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:43] [ns_1@127.0.0.1:<0.25930.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:39:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25934.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:39:43] [ns_1@127.0.0.1:<0.25937.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:44] [ns_1@127.0.0.1:<0.25888.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:44] [ns_1@127.0.0.1:<0.25877.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:45] [ns_1@127.0.0.1:<0.25942.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:45] [ns_1@127.0.0.1:<0.25870.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:39:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25941.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:39:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25974.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:39:46] [ns_1@127.0.0.1:<0.25901.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:46] [ns_1@127.0.0.1:<0.25903.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:47] [ns_1@127.0.0.1:<0.25923.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:47] [ns_1@127.0.0.1:<0.25886.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:48] [ns_1@127.0.0.1:<0.25914.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:48] [ns_1@127.0.0.1:<0.25919.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:49] [ns_1@127.0.0.1:<0.25960.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:49] [ns_1@127.0.0.1:<0.25994.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:49] [ns_1@127.0.0.1:<0.25899.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:50] [ns_1@127.0.0.1:<0.25953.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:50] [ns_1@127.0.0.1:<0.25994.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:39:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.25974.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:39:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26006.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:39:50] [ns_1@127.0.0.1:<0.25955.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:51] [ns_1@127.0.0.1:<0.25971.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:51] [ns_1@127.0.0.1:<0.25994.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:51] [ns_1@127.0.0.1:<0.25912.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:52] [ns_1@127.0.0.1:<0.25965.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:52] [ns_1@127.0.0.1:<0.25994.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:52] [ns_1@127.0.0.1:<0.25967.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:53] [ns_1@127.0.0.1:<0.25986.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:39:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.25994.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:39:53] [ns_1@127.0.0.1:<0.25950.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:54] [ns_1@127.0.0.1:<0.25977.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:54] [ns_1@127.0.0.1:<0.25981.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:55] [ns_1@127.0.0.1:<0.25999.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:39:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26006.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:39:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26041.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:39:56] [ns_1@127.0.0.1:<0.25962.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:56] [ns_1@127.0.0.1:<0.25990.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:57] [ns_1@127.0.0.1:<0.25992.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:57] [ns_1@127.0.0.1:<0.26014.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:58] [ns_1@127.0.0.1:<0.25975.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:58] [ns_1@127.0.0.1:<0.26007.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:59] [ns_1@127.0.0.1:<0.26009.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:39:59] [ns_1@127.0.0.1:<0.26030.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:39:59] [ns_1@127.0.0.1:<0.26066.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:39:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,754797,45433}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38272120}, {processes,10066544}, {processes_used,8441064}, {system,28205576}, {atom,1306681}, {atom_used,1284164}, {binary,458736}, {code,12859877}, {ets,2433096}]}, {system_stats, [{cpu_utilization_rate,25.628140703517587}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,5818}, {memory_data,{4040077312,4014014464,{<0.18771.0>,630216}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25320 kB\nBuffers: 63336 kB\nCached: 3527896 kB\nSwapCached: 0 kB\nActive: 311388 kB\nInactive: 3440564 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25320 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 76 kB\nWriteback: 0 kB\nAnonPages: 160720 kB\nMapped: 24872 kB\nSlab: 134364 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 585284 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3612565504}, {buffered_memory,64856064}, {free_memory,25927680}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{5813782,1}}, {context_switches,{1664865,0}}, {garbage_collection,{883862,1245217313,0}}, {io,{{input,27269280},{output,64687276}}}, {reductions,{351804628,581866}}, {run_queue,0}, {runtime,{73790,200}}]}]}] [stats:error] [2012-03-26 2:40:00] [ns_1@127.0.0.1:<0.25988.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:00] [ns_1@127.0.0.1:<0.26022.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:00] [ns_1@127.0.0.1:<0.26066.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:40:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26041.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:40:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26076.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:40:01] [ns_1@127.0.0.1:<0.26024.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:01] [ns_1@127.0.0.1:<0.26036.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:01] [ns_1@127.0.0.1:<0.26051.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:01] [ns_1@127.0.0.1:<0.26042.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:01] [ns_1@127.0.0.1:<0.26066.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:02] [ns_1@127.0.0.1:<0.26001.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:02] [ns_1@127.0.0.1:<0.26034.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:02] [ns_1@127.0.0.1:<0.26066.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:03] [ns_1@127.0.0.1:<0.26061.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:03] [ns_1@127.0.0.1:<0.26055.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:40:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26066.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:40:04] [ns_1@127.0.0.1:<0.26019.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:04] [ns_1@127.0.0.1:<0.26047.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:05] [ns_1@127.0.0.1:<0.26083.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:05] [ns_1@127.0.0.1:<0.26070.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:40:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26076.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:40:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26115.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:40:06] [ns_1@127.0.0.1:<0.26032.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:06] [ns_1@127.0.0.1:<0.26059.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:07] [ns_1@127.0.0.1:<0.26098.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:07] [ns_1@127.0.0.1:<0.26090.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:08] [ns_1@127.0.0.1:<0.26044.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:08] [ns_1@127.0.0.1:<0.26077.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:09] [ns_1@127.0.0.1:<0.26109.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:09] [ns_1@127.0.0.1:<0.26103.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:09] [ns_1@127.0.0.1:<0.26142.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:10] [ns_1@127.0.0.1:<0.26057.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:10] [ns_1@127.0.0.1:<0.26079.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:10] [ns_1@127.0.0.1:<0.26142.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:40:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26115.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:40:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26151.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:40:11] [ns_1@127.0.0.1:<0.26125.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:11] [ns_1@127.0.0.1:<0.26116.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:11] [ns_1@127.0.0.1:<0.26142.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:12] [ns_1@127.0.0.1:<0.26072.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:12] [ns_1@127.0.0.1:<0.26142.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:12] [ns_1@127.0.0.1:<0.26081.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:13] [ns_1@127.0.0.1:<0.26136.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:40:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26142.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:40:13] [ns_1@127.0.0.1:<0.26129.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:14] [ns_1@127.0.0.1:<0.26092.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:14] [ns_1@127.0.0.1:<0.26096.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:15] [ns_1@127.0.0.1:<0.26156.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:15] [ns_1@127.0.0.1:<0.26145.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:40:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26151.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:40:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26186.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:40:16] [ns_1@127.0.0.1:<0.26105.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:16] [ns_1@127.0.0.1:<0.26107.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:17] [ns_1@127.0.0.1:<0.26172.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:17] [ns_1@127.0.0.1:<0.26162.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:18] [ns_1@127.0.0.1:<0.26118.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:18] [ns_1@127.0.0.1:<0.26123.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:19] [ns_1@127.0.0.1:<0.26183.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:19] [ns_1@127.0.0.1:<0.26208.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:19] [ns_1@127.0.0.1:<0.26174.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:20] [ns_1@127.0.0.1:<0.26131.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:20] [ns_1@127.0.0.1:<0.26208.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:20] [ns_1@127.0.0.1:<0.26134.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:40:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26186.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:40:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26220.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:40:21] [ns_1@127.0.0.1:<0.26198.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:21] [ns_1@127.0.0.1:<0.26208.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:21] [ns_1@127.0.0.1:<0.26187.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:22] [ns_1@127.0.0.1:<0.26147.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:22] [ns_1@127.0.0.1:<0.26208.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:22] [ns_1@127.0.0.1:<0.26152.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:23] [ns_1@127.0.0.1:<0.26211.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:40:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26208.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:40:24] [ns_1@127.0.0.1:<0.26200.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:24] [ns_1@127.0.0.1:<0.26165.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:25] [ns_1@127.0.0.1:<0.26167.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:25] [ns_1@127.0.0.1:<0.26226.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:40:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26220.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:40:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26253.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:40:26] [ns_1@127.0.0.1:<0.26213.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:26] [ns_1@127.0.0.1:<0.26177.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:27] [ns_1@127.0.0.1:<0.26179.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:27] [ns_1@127.0.0.1:<0.26242.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:28] [ns_1@127.0.0.1:<0.26231.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:28] [ns_1@127.0.0.1:<0.26189.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:29] [ns_1@127.0.0.1:<0.26193.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:29] [ns_1@127.0.0.1:<0.26254.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:29] [ns_1@127.0.0.1:<0.26277.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:30] [ns_1@127.0.0.1:<0.26244.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:30] [ns_1@127.0.0.1:<0.26202.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:30] [ns_1@127.0.0.1:<0.26277.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:40:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26253.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:40:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26287.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:40:31] [ns_1@127.0.0.1:<0.26204.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:31] [ns_1@127.0.0.1:<0.26267.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:31] [ns_1@127.0.0.1:<0.26277.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:32] [ns_1@127.0.0.1:<0.26256.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:32] [ns_1@127.0.0.1:<0.26217.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:32] [ns_1@127.0.0.1:<0.26277.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:33] [ns_1@127.0.0.1:<0.26221.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:33] [ns_1@127.0.0.1:<0.26281.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:40:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26277.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:40:34] [ns_1@127.0.0.1:<0.26269.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:34] [ns_1@127.0.0.1:<0.26234.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:35] [ns_1@127.0.0.1:<0.26236.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:35] [ns_1@127.0.0.1:<0.26297.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:40:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26287.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:40:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26322.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:40:36] [ns_1@127.0.0.1:<0.26283.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:36] [ns_1@127.0.0.1:<0.26246.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:37] [ns_1@127.0.0.1:<0.26248.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:37] [ns_1@127.0.0.1:<0.26310.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:38] [ns_1@127.0.0.1:<0.26299.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:38] [ns_1@127.0.0.1:<0.26261.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:39] [ns_1@127.0.0.1:<0.26263.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:39] [ns_1@127.0.0.1:<0.26345.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:39] [ns_1@127.0.0.1:<0.26323.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:40] [ns_1@127.0.0.1:<0.26312.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:40] [ns_1@127.0.0.1:<0.26345.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:40] [ns_1@127.0.0.1:<0.26271.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:40:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26322.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:40:41] [ns_1@127.0.0.1:<0.26345.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:42] [ns_1@127.0.0.1:<0.26345.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:info] [2012-03-26 2:40:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26356.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:40:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:40:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26345.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:40:44] [ns_1@127.0.0.1:<0.26336.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:44] [ns_1@127.0.0.1:<0.26325.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:45] [ns_1@127.0.0.1:<0.26288.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:45] [ns_1@127.0.0.1:<0.26290.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:40:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26356.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:40:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26379.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:40:46] [ns_1@127.0.0.1:<0.26350.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:46] [ns_1@127.0.0.1:<0.26338.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:47] [ns_1@127.0.0.1:<0.26303.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:47] [ns_1@127.0.0.1:<0.26273.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:48] [ns_1@127.0.0.1:<0.26369.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:48] [ns_1@127.0.0.1:<0.26353.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:49] [ns_1@127.0.0.1:<0.26314.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:49] [ns_1@127.0.0.1:<0.26305.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:49] [ns_1@127.0.0.1:<0.26403.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:50] [ns_1@127.0.0.1:<0.26382.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:50] [ns_1@127.0.0.1:<0.26372.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:50] [ns_1@127.0.0.1:<0.26403.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:40:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26379.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:40:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26413.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:40:51] [ns_1@127.0.0.1:<0.26330.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:51] [ns_1@127.0.0.1:<0.26316.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:51] [ns_1@127.0.0.1:<0.26403.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:52] [ns_1@127.0.0.1:<0.26395.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:52] [ns_1@127.0.0.1:<0.26386.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:52] [ns_1@127.0.0.1:<0.26403.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:53] [ns_1@127.0.0.1:<0.26341.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:53] [ns_1@127.0.0.1:<0.26334.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:40:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26403.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:40:54] [ns_1@127.0.0.1:<0.26408.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:54] [ns_1@127.0.0.1:<0.26397.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:55] [ns_1@127.0.0.1:<0.26374.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:55] [ns_1@127.0.0.1:<0.26348.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:40:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26413.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:40:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26448.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:40:56] [ns_1@127.0.0.1:<0.26426.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:56] [ns_1@127.0.0.1:<0.26414.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:57] [ns_1@127.0.0.1:<0.26388.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:57] [ns_1@127.0.0.1:<0.26367.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:58] [ns_1@127.0.0.1:<0.26439.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:58] [ns_1@127.0.0.1:<0.26429.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:40:59] [ns_1@127.0.0.1:<0.26399.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:40:59] [ns_1@127.0.0.1:<0.26471.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:40:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,754857,78341}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38451320}, {processes,10220768}, {processes_used,8595288}, {system,28230552}, {atom,1306681}, {atom_used,1284164}, {binary,448608}, {code,12859877}, {ets,2461880}]}, {system_stats, [{cpu_utilization_rate,26.40449438202247}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,5878}, {memory_data,{4040077312,4014141440,{<0.18771.0>,634440}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25444 kB\nBuffers: 63388 kB\nCached: 3528056 kB\nSwapCached: 0 kB\nActive: 311512 kB\nInactive: 3440656 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25444 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 84 kB\nWriteback: 0 kB\nAnonPages: 160724 kB\nMapped: 24872 kB\nSlab: 134372 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 583208 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3612729344}, {buffered_memory,64909312}, {free_memory,26054656}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{5873814,2}}, {context_switches,{1677578,0}}, {garbage_collection,{890353,1255941985,0}}, {io,{{input,27299851},{output,65115649}}}, {reductions,{354246421,543269}}, {run_queue,0}, {runtime,{74640,190}}]}]}] [stats:error] [2012-03-26 2:40:59] [ns_1@127.0.0.1:<0.26380.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:00] [ns_1@127.0.0.1:<0.26451.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:00] [ns_1@127.0.0.1:<0.26471.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:00] [ns_1@127.0.0.1:<0.26441.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:41:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26448.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:41:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26483.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:41:01] [ns_1@127.0.0.1:<0.26416.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:01] [ns_1@127.0.0.1:<0.26431.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:01] [ns_1@127.0.0.1:<0.26443.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:01] [ns_1@127.0.0.1:<0.26458.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:01] [ns_1@127.0.0.1:<0.26474.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:01] [ns_1@127.0.0.1:<0.26471.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:01] [ns_1@127.0.0.1:<0.26393.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:02] [ns_1@127.0.0.1:<0.26464.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:02] [ns_1@127.0.0.1:<0.26471.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:02] [ns_1@127.0.0.1:<0.26456.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:03] [ns_1@127.0.0.1:<0.26497.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:41:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26471.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:41:03] [ns_1@127.0.0.1:<0.26406.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:04] [ns_1@127.0.0.1:<0.26480.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:04] [ns_1@127.0.0.1:<0.26466.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:05] [ns_1@127.0.0.1:<0.26512.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:05] [ns_1@127.0.0.1:<0.26424.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:41:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26483.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:41:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26526.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:41:06] [ns_1@127.0.0.1:<0.26505.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:06] [ns_1@127.0.0.1:<0.26484.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:07] [ns_1@127.0.0.1:<0.26523.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:07] [ns_1@127.0.0.1:<0.26437.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:08] [ns_1@127.0.0.1:<0.26516.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:08] [ns_1@127.0.0.1:<0.26486.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:09] [ns_1@127.0.0.1:<0.26538.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:09] [ns_1@127.0.0.1:<0.26551.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:09] [ns_1@127.0.0.1:<0.26449.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:10] [ns_1@127.0.0.1:<0.26529.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:10] [ns_1@127.0.0.1:<0.26551.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:41:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26526.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:41:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26560.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:41:11] [ns_1@127.0.0.1:<0.26488.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:11] [ns_1@127.0.0.1:<0.26554.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:11] [ns_1@127.0.0.1:<0.26551.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:12] [ns_1@127.0.0.1:<0.26462.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:12] [ns_1@127.0.0.1:<0.26543.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:12] [ns_1@127.0.0.1:<0.26551.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:13] [ns_1@127.0.0.1:<0.26490.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:13] [ns_1@127.0.0.1:<0.26569.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:41:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26551.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:41:14] [ns_1@127.0.0.1:<0.26477.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:14] [ns_1@127.0.0.1:<0.26561.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:15] [ns_1@127.0.0.1:<0.26492.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:15] [ns_1@127.0.0.1:<0.26583.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:41:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26560.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:41:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26595.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:41:16] [ns_1@127.0.0.1:<0.26501.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:16] [ns_1@127.0.0.1:<0.26576.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:17] [ns_1@127.0.0.1:<0.26507.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:17] [ns_1@127.0.0.1:<0.26596.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:18] [ns_1@127.0.0.1:<0.26514.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:18] [ns_1@127.0.0.1:<0.26588.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:19] [ns_1@127.0.0.1:<0.26518.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:19] [ns_1@127.0.0.1:<0.26609.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:19] [ns_1@127.0.0.1:<0.26619.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:20] [ns_1@127.0.0.1:<0.26527.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:20] [ns_1@127.0.0.1:<0.26602.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:20] [ns_1@127.0.0.1:<0.26619.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:41:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26595.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:41:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26629.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:41:21] [ns_1@127.0.0.1:<0.26534.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:21] [ns_1@127.0.0.1:<0.26622.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:21] [ns_1@127.0.0.1:<0.26619.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:22] [ns_1@127.0.0.1:<0.26540.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:22] [ns_1@127.0.0.1:<0.26613.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:22] [ns_1@127.0.0.1:<0.26619.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:23] [ns_1@127.0.0.1:<0.26545.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:23] [ns_1@127.0.0.1:<0.26640.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:41:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26619.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:41:24] [ns_1@127.0.0.1:<0.26556.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:24] [ns_1@127.0.0.1:<0.26630.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:25] [ns_1@127.0.0.1:<0.26563.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:25] [ns_1@127.0.0.1:<0.26653.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:41:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26629.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:41:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26664.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:41:26] [ns_1@127.0.0.1:<0.26573.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:26] [ns_1@127.0.0.1:<0.26645.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:27] [ns_1@127.0.0.1:<0.26578.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:27] [ns_1@127.0.0.1:<0.26665.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:28] [ns_1@127.0.0.1:<0.26585.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:28] [ns_1@127.0.0.1:<0.26657.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:29] [ns_1@127.0.0.1:<0.26590.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:29] [ns_1@127.0.0.1:<0.26686.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:29] [ns_1@127.0.0.1:<0.26678.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:30] [ns_1@127.0.0.1:<0.26598.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:30] [ns_1@127.0.0.1:<0.26686.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:30] [ns_1@127.0.0.1:<0.26672.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:41:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26664.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:41:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26698.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:41:31] [ns_1@127.0.0.1:<0.26604.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:31] [ns_1@127.0.0.1:<0.26686.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:31] [ns_1@127.0.0.1:<0.26692.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:32] [ns_1@127.0.0.1:<0.26611.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:32] [ns_1@127.0.0.1:<0.26686.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:32] [ns_1@127.0.0.1:<0.26682.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:33] [ns_1@127.0.0.1:<0.26615.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:41:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26686.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:41:33] [ns_1@127.0.0.1:<0.26708.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:34] [ns_1@127.0.0.1:<0.26624.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:34] [ns_1@127.0.0.1:<0.26699.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:35] [ns_1@127.0.0.1:<0.26632.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:35] [ns_1@127.0.0.1:<0.26721.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:41:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26698.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:41:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26733.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:41:36] [ns_1@127.0.0.1:<0.26642.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:36] [ns_1@127.0.0.1:<0.26714.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:37] [ns_1@127.0.0.1:<0.26647.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:37] [ns_1@127.0.0.1:<0.26734.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:38] [ns_1@127.0.0.1:<0.26655.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:38] [ns_1@127.0.0.1:<0.26725.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:39] [ns_1@127.0.0.1:<0.26659.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:39] [ns_1@127.0.0.1:<0.26756.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:39] [ns_1@127.0.0.1:<0.26747.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:40] [ns_1@127.0.0.1:<0.26667.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:40] [ns_1@127.0.0.1:<0.26756.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:41:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26733.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:41:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26765.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:41:41] [ns_1@127.0.0.1:<0.26741.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:41] [ns_1@127.0.0.1:<0.26756.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:42] [ns_1@127.0.0.1:<0.26761.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:42] [ns_1@127.0.0.1:<0.26680.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:42] [ns_1@127.0.0.1:<0.26756.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:43] [ns_1@127.0.0.1:<0.26752.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:43] [ns_1@127.0.0.1:<0.26676.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:41:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26756.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:41:44] [ns_1@127.0.0.1:<0.26776.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:44] [ns_1@127.0.0.1:<0.26695.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:45] [ns_1@127.0.0.1:<0.26781.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:45] [ns_1@127.0.0.1:<0.26689.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:41:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26765.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:41:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26798.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:41:46] [ns_1@127.0.0.1:<0.26788.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:46] [ns_1@127.0.0.1:<0.26712.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:47] [ns_1@127.0.0.1:<0.26793.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:47] [ns_1@127.0.0.1:<0.26704.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:48] [ns_1@127.0.0.1:<0.26801.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:48] [ns_1@127.0.0.1:<0.26723.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:49] [ns_1@127.0.0.1:<0.26807.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:49] [ns_1@127.0.0.1:<0.26719.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:49] [ns_1@127.0.0.1:<0.26822.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:50] [ns_1@127.0.0.1:<0.26814.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:50] [ns_1@127.0.0.1:<0.26736.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:50] [ns_1@127.0.0.1:<0.26822.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:41:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26798.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:41:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26832.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:41:51] [ns_1@127.0.0.1:<0.26818.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:51] [ns_1@127.0.0.1:<0.26730.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:51] [ns_1@127.0.0.1:<0.26822.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:52] [ns_1@127.0.0.1:<0.26827.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:52] [ns_1@127.0.0.1:<0.26750.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:52] [ns_1@127.0.0.1:<0.26822.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:53] [ns_1@127.0.0.1:<0.26835.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:53] [ns_1@127.0.0.1:<0.26745.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:41:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26822.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:41:54] [ns_1@127.0.0.1:<0.26845.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:54] [ns_1@127.0.0.1:<0.26766.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:55] [ns_1@127.0.0.1:<0.26850.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:55] [ns_1@127.0.0.1:<0.26759.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:41:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26832.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:41:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26867.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:41:56] [ns_1@127.0.0.1:<0.26858.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:56] [ns_1@127.0.0.1:<0.26779.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:57] [ns_1@127.0.0.1:<0.26862.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:57] [ns_1@127.0.0.1:<0.26772.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:58] [ns_1@127.0.0.1:<0.26870.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:58] [ns_1@127.0.0.1:<0.26791.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:41:59] [ns_1@127.0.0.1:<0.26877.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:41:59] [ns_1@127.0.0.1:<0.26905.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:41:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,754917,112604}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38344832}, {processes,10134352}, {processes_used,8508872}, {system,28210480}, {atom,1306681}, {atom_used,1284164}, {binary,451528}, {code,12859877}, {ets,2431392}]}, {system_stats, [{cpu_utilization_rate,25.6857855361596}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,5938}, {memory_data,{4040077312,4014022656,{<0.18771.0>,638664}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25072 kB\nBuffers: 63444 kB\nCached: 3528212 kB\nSwapCached: 0 kB\nActive: 311632 kB\nInactive: 3440756 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25072 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 76 kB\nWriteback: 0 kB\nAnonPages: 160732 kB\nMapped: 24872 kB\nSlab: 134368 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 583208 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3612889088}, {buffered_memory,64966656}, {free_memory,25673728}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{5933849,0}}, {context_switches,{1690554,0}}, {garbage_collection,{897000,1267097407,0}}, {io,{{input,27330440},{output,65559431}}}, {reductions,{356773703,579498}}, {run_queue,0}, {runtime,{75440,180}}]}]}] [stats:error] [2012-03-26 2:41:59] [ns_1@127.0.0.1:<0.26786.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:00] [ns_1@127.0.0.1:<0.26883.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:00] [ns_1@127.0.0.1:<0.26905.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:00] [ns_1@127.0.0.1:<0.26803.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:42:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26867.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:42:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.26917.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:42:01] [ns_1@127.0.0.1:<0.26908.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:01] [ns_1@127.0.0.1:<0.26905.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:01] [ns_1@127.0.0.1:<0.26799.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:01] [ns_1@127.0.0.1:<0.26812.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:01] [ns_1@127.0.0.1:<0.26856.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:01] [ns_1@127.0.0.1:<0.26868.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:01] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason shutdown [ns_server:info] [2012-03-26 2:42:01] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 2:42:01] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:42:01] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:42:01] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 2:42:01] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:warn] [2012-03-26 2:42:06] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 2:42:06] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 2:42:06] [ns_1@127.0.0.1:<0.26881.1>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 2:42:06] [ns_1@127.0.0.1:<0.26860.1>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 2:42:06] [ns_1@127.0.0.1:<0.26962.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:06] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 2:42:06] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 2:42:06] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:42:06] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:42:06] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 2:42:06] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 2:42:06] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:42:06] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:42:06] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 2:42:06] [ns_1@127.0.0.1:<0.26875.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:07] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 02:42:08: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 2:42:07] [ns_1@127.0.0.1:<0.26923.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:07] [ns_1@127.0.0.1:<0.26962.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:07] [ns_1@127.0.0.1:<0.26911.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:08] [ns_1@127.0.0.1:<0.26914.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:08] [ns_1@127.0.0.1:<0.26962.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:08] [ns_1@127.0.0.1:<0.26885.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:09] [ns_1@127.0.0.1:<0.26925.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.26962.1>} [ns_server:info] [2012-03-26 2:42:09] [ns_1@127.0.0.1:<0.26962.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:09] [ns_1@127.0.0.1:<0.26982.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:10] [ns_1@127.0.0.1:<0.26955.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:10] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:42:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.26962.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 2:42:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.26917.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:42:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27001.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:42:11] [ns_1@127.0.0.1:<0.26918.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:11] [ns_1@127.0.0.1:<0.26927.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:12] [ns_1@127.0.0.1:<0.26997.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:12] [ns_1@127.0.0.1:<0.26956.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:13] [ns_1@127.0.0.1:<0.26975.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:13] [ns_1@127.0.0.1:<0.26930.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:14] [ns_1@127.0.0.1:<0.27013.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:14] [ns_1@127.0.0.1:<0.26957.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:15] [ns_1@127.0.0.1:<0.26988.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:15] [ns_1@127.0.0.1:<0.26933.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:42:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27001.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:42:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27033.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:42:16] [ns_1@127.0.0.1:<0.27023.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:16] [ns_1@127.0.0.1:<0.26958.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:17] [ns_1@127.0.0.1:<0.27004.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:17] [ns_1@127.0.0.1:<0.26935.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:18] [ns_1@127.0.0.1:<0.27036.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:18] [ns_1@127.0.0.1:<0.26968.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:19] [ns_1@127.0.0.1:<0.27055.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:19] [ns_1@127.0.0.1:<0.27017.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:20] [ns_1@127.0.0.1:<0.27049.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:20] [ns_1@127.0.0.1:<0.27055.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:42:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27033.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:42:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27063.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:42:21] [ns_1@127.0.0.1:<0.27002.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:21] [ns_1@127.0.0.1:<0.27028.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:21] [ns_1@127.0.0.1:<0.27055.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:22] [ns_1@127.0.0.1:<0.27009.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:22] [ns_1@127.0.0.1:<0.26986.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:22] [ns_1@127.0.0.1:<0.27055.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:23] [ns_1@127.0.0.1:<0.27015.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:23] [ns_1@127.0.0.1:<0.27042.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:42:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27055.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:42:24] [ns_1@127.0.0.1:<0.27021.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:24] [ns_1@127.0.0.1:<0.27064.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:25] [ns_1@127.0.0.1:<0.27026.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:25] [ns_1@127.0.0.1:<0.26937.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:42:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27063.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:42:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27098.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:42:26] [ns_1@127.0.0.1:<0.27034.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:27] [ns_1@127.0.0.1:<0.27079.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:27] [ns_1@127.0.0.1:<0.27081.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:28] [ns_1@127.0.0.1:<0.27087.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:28] [ns_1@127.0.0.1:<0.27089.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:29] [ns_1@127.0.0.1:<0.27091.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:29] [ns_1@127.0.0.1:<0.27093.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:29] [ns_1@127.0.0.1:<0.27120.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:30] [ns_1@127.0.0.1:<0.27099.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:30] [ns_1@127.0.0.1:<0.27102.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:30] [ns_1@127.0.0.1:<0.27120.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:42:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27098.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:42:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27130.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:42:31] [ns_1@127.0.0.1:<0.27066.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:31] [ns_1@127.0.0.1:<0.26980.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:31] [ns_1@127.0.0.1:<0.27120.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:32] [ns_1@127.0.0.1:<0.27058.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:32] [ns_1@127.0.0.1:<0.27114.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:32] [ns_1@127.0.0.1:<0.27120.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:33] [ns_1@127.0.0.1:<0.27106.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:42:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27120.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:42:33] [ns_1@127.0.0.1:<0.26995.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:34] [ns_1@127.0.0.1:<0.27076.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:34] [ns_1@127.0.0.1:<0.27038.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:35] [ns_1@127.0.0.1:<0.27116.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:35] [ns_1@127.0.0.1:<0.27071.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:42:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27130.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:42:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27165.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:42:36] [ns_1@127.0.0.1:<0.27112.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:36] [ns_1@127.0.0.1:<0.27051.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:37] [ns_1@127.0.0.1:<0.27133.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:37] [ns_1@127.0.0.1:<0.27110.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:38] [ns_1@127.0.0.1:<0.27126.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:38] [ns_1@127.0.0.1:<0.27131.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:39] [ns_1@127.0.0.1:<0.27151.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:39] [ns_1@127.0.0.1:<0.27188.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:39] [ns_1@127.0.0.1:<0.27124.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:40] [ns_1@127.0.0.1:<0.27142.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:40] [ns_1@127.0.0.1:<0.27188.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:40] [ns_1@127.0.0.1:<0.27146.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:42:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27165.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:42:41] [ns_1@127.0.0.1:<0.27188.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:info] [2012-03-26 2:42:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27199.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:42:42] [ns_1@127.0.0.1:<0.27188.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:43] [ns_1@127.0.0.1:<0.27157.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:43] [ns_1@127.0.0.1:<0.27162.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:42:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27188.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:42:44] [ns_1@127.0.0.1:<0.27140.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:44] [ns_1@127.0.0.1:<0.27155.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:45] [ns_1@127.0.0.1:<0.27173.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:45] [ns_1@127.0.0.1:<0.27177.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:42:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27199.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:42:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27226.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:42:46] [ns_1@127.0.0.1:<0.27153.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:46] [ns_1@127.0.0.1:<0.27168.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:47] [ns_1@127.0.0.1:<0.27184.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:47] [ns_1@127.0.0.1:<0.27191.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:48] [ns_1@127.0.0.1:<0.27179.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:48] [ns_1@127.0.0.1:<0.27182.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:49] [ns_1@127.0.0.1:<0.27209.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:49] [ns_1@127.0.0.1:<0.27248.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:49] [ns_1@127.0.0.1:<0.27047.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:50] [ns_1@127.0.0.1:<0.27216.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:50] [ns_1@127.0.0.1:<0.27248.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:50] [ns_1@127.0.0.1:<0.27207.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:42:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27226.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:42:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27260.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:42:51] [ns_1@127.0.0.1:<0.27223.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:51] [ns_1@127.0.0.1:<0.27248.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:51] [ns_1@127.0.0.1:<0.27214.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:52] [ns_1@127.0.0.1:<0.27229.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:52] [ns_1@127.0.0.1:<0.27248.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:52] [ns_1@127.0.0.1:<0.27219.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:42:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27248.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:42:55] [ns_1@127.0.0.1:<0.27233.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:55] [ns_1@127.0.0.1:<0.27238.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:42:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27260.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:42:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27287.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:42:56] [ns_1@127.0.0.1:<0.27240.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:56] [ns_1@127.0.0.1:<0.27242.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:57] [ns_1@127.0.0.1:<0.27244.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:57] [ns_1@127.0.0.1:<0.27251.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:58] [ns_1@127.0.0.1:<0.27227.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:58] [ns_1@127.0.0.1:<0.27196.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:42:59] [ns_1@127.0.0.1:<0.27261.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:42:59] [ns_1@127.0.0.1:<0.27310.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:42:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,754977,142315}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38195320}, {processes,9911168}, {processes_used,8285688}, {system,28284152}, {atom,1306681}, {atom_used,1284164}, {binary,488184}, {code,12859877}, {ets,2462752}]}, {system_stats, [{cpu_utilization_rate,25.56390977443609}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,5998}, {memory_data,{4040077312,4014403584,{<0.18771.0>,642888}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 24568 kB\nBuffers: 63572 kB\nCached: 3528316 kB\nSwapCached: 0 kB\nActive: 311736 kB\nInactive: 3440892 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 24568 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 80 kB\nWriteback: 0 kB\nAnonPages: 160752 kB\nMapped: 24872 kB\nSlab: 134368 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582772 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3612995584}, {buffered_memory,65097728}, {free_memory,25157632}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{5993879,0}}, {context_switches,{1703841,0}}, {garbage_collection,{903907,1278423124,0}}, {io,{{input,27622148},{output,66317316}}}, {reductions,{359338557,578804}}, {run_queue,0}, {runtime,{76210,180}}]}]}] [stats:error] [2012-03-26 2:42:59] [ns_1@127.0.0.1:<0.27266.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:00] [ns_1@127.0.0.1:<0.27166.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:00] [ns_1@127.0.0.1:<0.27310.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:00] [ns_1@127.0.0.1:<0.27257.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:43:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27287.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:43:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27322.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:43:01] [ns_1@127.0.0.1:<0.27284.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:01] [ns_1@127.0.0.1:<0.27310.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:01] [ns_1@127.0.0.1:<0.27288.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:02] [ns_1@127.0.0.1:<0.27193.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:02] [ns_1@127.0.0.1:<0.27310.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:02] [ns_1@127.0.0.1:<0.27274.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:03] [ns_1@127.0.0.1:<0.27299.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:43:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27310.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:43:03] [ns_1@127.0.0.1:<0.27301.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:04] [ns_1@127.0.0.1:<0.27271.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:04] [ns_1@127.0.0.1:<0.27280.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:05] [ns_1@127.0.0.1:<0.27314.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:05] [ns_1@127.0.0.1:<0.27316.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:43:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27322.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:43:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27357.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:43:06] [ns_1@127.0.0.1:<0.27290.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:06] [ns_1@127.0.0.1:<0.27295.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:09] [ns_1@127.0.0.1:<0.27372.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:10] [ns_1@127.0.0.1:<0.27372.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:43:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27357.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:43:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27377.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:43:11] [ns_1@127.0.0.1:<0.27372.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:12] [ns_1@127.0.0.1:<0.27372.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:43:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27372.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 2:43:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27377.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:43:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27392.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:43:16] [ns_1@127.0.0.1:<0.27305.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:17] [ns_1@127.0.0.1:<0.27328.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:17] [ns_1@127.0.0.1:<0.27332.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:18] [ns_1@127.0.0.1:<0.27303.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:18] [ns_1@127.0.0.1:<0.27323.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:19] [ns_1@127.0.0.1:<0.27343.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:19] [ns_1@127.0.0.1:<0.27412.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:20] [ns_1@127.0.0.1:<0.27253.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:20] [ns_1@127.0.0.1:<0.27319.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:20] [ns_1@127.0.0.1:<0.27412.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:43:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27392.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:43:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27422.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:43:21] [ns_1@127.0.0.1:<0.27338.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:21] [ns_1@127.0.0.1:<0.27354.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:21] [ns_1@127.0.0.1:<0.27412.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:22] [ns_1@127.0.0.1:<0.27345.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:22] [ns_1@127.0.0.1:<0.27336.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:22] [ns_1@127.0.0.1:<0.27412.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:23] [ns_1@127.0.0.1:<0.27349.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:23] [ns_1@127.0.0.1:<0.27402.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:43:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27412.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:43:24] [ns_1@127.0.0.1:<0.27358.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:25] [ns_1@127.0.0.1:<0.27397.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:25] [ns_1@127.0.0.1:<0.27415.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:43:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27422.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:43:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27455.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:43:26] [ns_1@127.0.0.1:<0.27404.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:26] [ns_1@127.0.0.1:<0.27347.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:27] [ns_1@127.0.0.1:<0.27408.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:27] [ns_1@127.0.0.1:<0.27430.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:28] [ns_1@127.0.0.1:<0.27417.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:28] [ns_1@127.0.0.1:<0.27393.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:29] [ns_1@127.0.0.1:<0.27425.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:29] [ns_1@127.0.0.1:<0.27477.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:29] [ns_1@127.0.0.1:<0.27446.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:30] [ns_1@127.0.0.1:<0.27435.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:30] [ns_1@127.0.0.1:<0.27477.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:30] [ns_1@127.0.0.1:<0.27360.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:43:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27455.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:43:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27489.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:43:31] [ns_1@127.0.0.1:<0.27440.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:31] [ns_1@127.0.0.1:<0.27477.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:31] [ns_1@127.0.0.1:<0.27456.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:32] [ns_1@127.0.0.1:<0.27458.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:32] [ns_1@127.0.0.1:<0.27477.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:32] [ns_1@127.0.0.1:<0.27406.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:43:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27477.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [error_logger:error] [2012-03-26 2:43:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27489.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:43:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27512.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:43:36] [ns_1@127.0.0.1:<0.27469.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:36] [ns_1@127.0.0.1:<0.27471.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:37] [ns_1@127.0.0.1:<0.27423.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:37] [ns_1@127.0.0.1:<0.27452.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:38] [ns_1@127.0.0.1:<0.27483.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:38] [ns_1@127.0.0.1:<0.27486.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:39] [ns_1@127.0.0.1:<0.27438.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:39] [ns_1@127.0.0.1:<0.27467.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:39] [ns_1@127.0.0.1:<0.27537.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:40] [ns_1@127.0.0.1:<0.27499.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:40] [ns_1@127.0.0.1:<0.27503.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:40] [ns_1@127.0.0.1:<0.27537.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:43:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27512.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:43:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27546.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:43:41] [ns_1@127.0.0.1:<0.27448.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:41] [ns_1@127.0.0.1:<0.27481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:41] [ns_1@127.0.0.1:<0.27537.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:42] [ns_1@127.0.0.1:<0.27515.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:42] [ns_1@127.0.0.1:<0.27517.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:42] [ns_1@127.0.0.1:<0.27537.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:43] [ns_1@127.0.0.1:<0.27463.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:43] [ns_1@127.0.0.1:<0.27495.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:43:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27537.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:43:44] [ns_1@127.0.0.1:<0.27528.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:44] [ns_1@127.0.0.1:<0.27531.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:45] [ns_1@127.0.0.1:<0.27473.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:45] [ns_1@127.0.0.1:<0.27513.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:43:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27546.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:43:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27581.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:43:46] [ns_1@127.0.0.1:<0.27542.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:46] [ns_1@127.0.0.1:<0.27547.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:47] [ns_1@127.0.0.1:<0.27490.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:47] [ns_1@127.0.0.1:<0.27526.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:48] [ns_1@127.0.0.1:<0.27559.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:48] [ns_1@127.0.0.1:<0.27562.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:49] [ns_1@127.0.0.1:<0.27522.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:49] [ns_1@127.0.0.1:<0.27540.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:49] [ns_1@127.0.0.1:<0.27605.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:50] [ns_1@127.0.0.1:<0.27605.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:51] [ns_1@127.0.0.1:<0.27605.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:50] [ns_1@127.0.0.1:<0.27571.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:43:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27581.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:43:52] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27613.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:43:52] [ns_1@127.0.0.1:<0.27605.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:52] [ns_1@127.0.0.1:<0.27574.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:53] [ns_1@127.0.0.1:<0.27533.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:43:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27605.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:43:53] [ns_1@127.0.0.1:<0.27555.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:54] [ns_1@127.0.0.1:<0.27584.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:43:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27613.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:43:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27636.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:43:56] [ns_1@127.0.0.1:<0.27597.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:57] [ns_1@127.0.0.1:<0.27599.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:57] [ns_1@127.0.0.1:<0.27601.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:58] [ns_1@127.0.0.1:<0.27608.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:58] [ns_1@127.0.0.1:<0.27621.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:59] [ns_1@127.0.0.1:<0.27623.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:43:59] [ns_1@127.0.0.1:<0.27629.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:43:59] [ns_1@127.0.0.1:<0.27659.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:43:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,755037,173623}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38457944}, {processes,10197128}, {processes_used,8571648}, {system,28260816}, {atom,1306681}, {atom_used,1284164}, {binary,485960}, {code,12859877}, {ets,2434736}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,6058}, {memory_data,{4040077312,4015284224,{<0.18771.0>,647112}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 24072 kB\nBuffers: 63640 kB\nCached: 3528480 kB\nSwapCached: 0 kB\nActive: 311832 kB\nInactive: 3441048 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 24072 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 68 kB\nWriteback: 0 kB\nAnonPages: 160756 kB\nMapped: 24872 kB\nSlab: 134360 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582772 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3613163520}, {buffered_memory,65167360}, {free_memory,24649728}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{6053910,1}}, {context_switches,{1715286,0}}, {garbage_collection,{910305,1287264428,0}}, {io,{{input,27652680},{output,66692580}}}, {reductions,{361517155,558393}}, {run_queue,0}, {runtime,{76840,180}}]}]}] [stats:error] [2012-03-26 2:44:00] [ns_1@127.0.0.1:<0.27631.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:00] [ns_1@127.0.0.1:<0.27588.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:00] [ns_1@127.0.0.1:<0.27659.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:44:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27636.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:44:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27669.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:44:01] [ns_1@127.0.0.1:<0.27549.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:01] [ns_1@127.0.0.1:<0.27569.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:01] [ns_1@127.0.0.1:<0.27659.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:02] [ns_1@127.0.0.1:<0.27637.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:02] [ns_1@127.0.0.1:<0.27659.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:02] [ns_1@127.0.0.1:<0.27642.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:03] [ns_1@127.0.0.1:<0.27564.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:44:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27659.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:44:03] [ns_1@127.0.0.1:<0.27582.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:04] [ns_1@127.0.0.1:<0.27650.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:04] [ns_1@127.0.0.1:<0.27652.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:05] [ns_1@127.0.0.1:<0.27576.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:05] [ns_1@127.0.0.1:<0.27595.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:44:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27669.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:44:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27704.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:44:06] [ns_1@127.0.0.1:<0.27665.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:06] [ns_1@127.0.0.1:<0.27670.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:07] [ns_1@127.0.0.1:<0.27590.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:07] [ns_1@127.0.0.1:<0.27648.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:08] [ns_1@127.0.0.1:<0.27682.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:08] [ns_1@127.0.0.1:<0.27685.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:09] [ns_1@127.0.0.1:<0.27644.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:09] [ns_1@127.0.0.1:<0.27729.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:09] [ns_1@127.0.0.1:<0.27663.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:10] [ns_1@127.0.0.1:<0.27694.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:10] [ns_1@127.0.0.1:<0.27729.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:10] [ns_1@127.0.0.1:<0.27696.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:44:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27704.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:44:10] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27740.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:44:11] [ns_1@127.0.0.1:<0.27654.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:11] [ns_1@127.0.0.1:<0.27729.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:11] [ns_1@127.0.0.1:<0.27679.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:12] [ns_1@127.0.0.1:<0.27707.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:12] [ns_1@127.0.0.1:<0.27729.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:12] [ns_1@127.0.0.1:<0.27712.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:13] [ns_1@127.0.0.1:<0.27674.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:44:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27729.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:44:13] [ns_1@127.0.0.1:<0.27692.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:14] [ns_1@127.0.0.1:<0.27721.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:14] [ns_1@127.0.0.1:<0.27723.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:15] [ns_1@127.0.0.1:<0.27690.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:44:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27740.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:44:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27773.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:44:16] [ns_1@127.0.0.1:<0.27705.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:16] [ns_1@127.0.0.1:<0.27737.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:17] [ns_1@127.0.0.1:<0.27741.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:17] [ns_1@127.0.0.1:<0.27756.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:17] [ns_1@127.0.0.1:<0.27768.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:17] [ns_1@127.0.0.1:<0.27701.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:18] [ns_1@127.0.0.1:<0.27718.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:18] [ns_1@127.0.0.1:<0.27754.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:19] [ns_1@127.0.0.1:<0.27786.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:19] [ns_1@127.0.0.1:<0.27716.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:19] [ns_1@127.0.0.1:<0.27801.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:20] [ns_1@127.0.0.1:<0.27734.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:20] [ns_1@127.0.0.1:<0.27766.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:20] [ns_1@127.0.0.1:<0.27801.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:44:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27773.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:44:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27811.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:44:21] [ns_1@127.0.0.1:<0.27797.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:21] [ns_1@127.0.0.1:<0.27732.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:21] [ns_1@127.0.0.1:<0.27801.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:22] [ns_1@127.0.0.1:<0.27751.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:22] [ns_1@127.0.0.1:<0.27778.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:22] [ns_1@127.0.0.1:<0.27801.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:23] [ns_1@127.0.0.1:<0.27814.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:23] [ns_1@127.0.0.1:<0.27747.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:44:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27801.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:44:24] [ns_1@127.0.0.1:<0.27763.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:24] [ns_1@127.0.0.1:<0.27780.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:25] [ns_1@127.0.0.1:<0.27829.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:25] [ns_1@127.0.0.1:<0.27761.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:44:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27811.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:44:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27846.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:44:26] [ns_1@127.0.0.1:<0.27776.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:26] [ns_1@127.0.0.1:<0.27782.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:27] [ns_1@127.0.0.1:<0.27841.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:27] [ns_1@127.0.0.1:<0.27774.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:28] [ns_1@127.0.0.1:<0.27793.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:28] [ns_1@127.0.0.1:<0.27795.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:29] [ns_1@127.0.0.1:<0.27856.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:29] [ns_1@127.0.0.1:<0.27791.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:29] [ns_1@127.0.0.1:<0.27870.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:30] [ns_1@127.0.0.1:<0.27806.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:30] [ns_1@127.0.0.1:<0.27870.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:30] [ns_1@127.0.0.1:<0.27812.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:44:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27846.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:44:31] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27880.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:44:31] [ns_1@127.0.0.1:<0.27870.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:32] [ns_1@127.0.0.1:<0.27824.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:32] [ns_1@127.0.0.1:<0.27870.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:32] [ns_1@127.0.0.1:<0.27827.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:33] [ns_1@127.0.0.1:<0.27866.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:44:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27870.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:44:33] [ns_1@127.0.0.1:<0.27804.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:34] [ns_1@127.0.0.1:<0.27837.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:34] [ns_1@127.0.0.1:<0.27839.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:35] [ns_1@127.0.0.1:<0.27897.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:35] [ns_1@127.0.0.1:<0.27819.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:44:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27880.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:44:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27911.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:44:36] [ns_1@127.0.0.1:<0.27849.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:36] [ns_1@127.0.0.1:<0.27854.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:37] [ns_1@127.0.0.1:<0.27908.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:37] [ns_1@127.0.0.1:<0.27835.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:38] [ns_1@127.0.0.1:<0.27862.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:38] [ns_1@127.0.0.1:<0.27864.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:39] [ns_1@127.0.0.1:<0.27923.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:39] [ns_1@127.0.0.1:<0.27934.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:39] [ns_1@127.0.0.1:<0.27847.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:40] [ns_1@127.0.0.1:<0.27889.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:40] [ns_1@127.0.0.1:<0.27934.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:44:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27911.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:44:40] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27943.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:44:41] [ns_1@127.0.0.1:<0.27892.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:41] [ns_1@127.0.0.1:<0.27934.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:41] [ns_1@127.0.0.1:<0.27937.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:42] [ns_1@127.0.0.1:<0.27899.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:42] [ns_1@127.0.0.1:<0.27934.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:42] [ns_1@127.0.0.1:<0.27901.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:43] [ns_1@127.0.0.1:<0.27903.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:44:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.27934.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:44:43] [ns_1@127.0.0.1:<0.27860.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:44] [ns_1@127.0.0.1:<0.27912.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:44] [ns_1@127.0.0.1:<0.27914.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:45] [ns_1@127.0.0.1:<0.27919.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:45] [ns_1@127.0.0.1:<0.27874.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:44:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27943.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:44:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27978.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:44:46] [ns_1@127.0.0.1:<0.27925.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:46] [ns_1@127.0.0.1:<0.27928.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:47] [ns_1@127.0.0.1:<0.27930.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:47] [ns_1@127.0.0.1:<0.27886.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:48] [ns_1@127.0.0.1:<0.27876.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:48] [ns_1@127.0.0.1:<0.27944.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:49] [ns_1@127.0.0.1:<0.27948.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:49] [ns_1@127.0.0.1:<0.28000.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:49] [ns_1@127.0.0.1:<0.27954.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:50] [ns_1@127.0.0.1:<0.27939.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:50] [ns_1@127.0.0.1:<0.28000.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:50] [ns_1@127.0.0.1:<0.27959.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:44:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.27978.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:44:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28012.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:44:51] [ns_1@127.0.0.1:<0.27964.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:51] [ns_1@127.0.0.1:<0.28000.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:51] [ns_1@127.0.0.1:<0.27966.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:52] [ns_1@127.0.0.1:<0.27957.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:52] [ns_1@127.0.0.1:<0.28000.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:52] [ns_1@127.0.0.1:<0.27971.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:53] [ns_1@127.0.0.1:<0.27975.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:44:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28000.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:44:53] [ns_1@127.0.0.1:<0.27979.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:54] [ns_1@127.0.0.1:<0.27969.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:54] [ns_1@127.0.0.1:<0.27985.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:55] [ns_1@127.0.0.1:<0.27990.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:44:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28012.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:44:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28045.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:44:56] [ns_1@127.0.0.1:<0.27992.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:56] [ns_1@127.0.0.1:<0.27981.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:57] [ns_1@127.0.0.1:<0.27996.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:57] [ns_1@127.0.0.1:<0.28003.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:58] [ns_1@127.0.0.1:<0.28005.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:58] [ns_1@127.0.0.1:<0.27994.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:59] [ns_1@127.0.0.1:<0.28013.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:44:59] [ns_1@127.0.0.1:<0.28018.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:44:59] [ns_1@127.0.0.1:<0.28075.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:44:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,755097,204674}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38386288}, {processes,10088872}, {processes_used,8463392}, {system,28297416}, {atom,1306681}, {atom_used,1284164}, {binary,487304}, {code,12859877}, {ets,2463616}]}, {system_stats, [{cpu_utilization_rate,25.699745547073793}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,6118}, {memory_data,{4040077312,4015411200,{<0.18771.0>,651336}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 24072 kB\nBuffers: 63712 kB\nCached: 3528624 kB\nSwapCached: 0 kB\nActive: 311956 kB\nInactive: 3441144 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 24072 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 100 kB\nWriteback: 0 kB\nAnonPages: 160764 kB\nMapped: 24872 kB\nSlab: 134324 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582772 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3613310976}, {buffered_memory,65241088}, {free_memory,24649728}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{6113942,1}}, {context_switches,{1727981,0}}, {garbage_collection,{917082,1297961176,0}}, {io,{{input,27683263},{output,67122695}}}, {reductions,{364003584,612982}}, {run_queue,0}, {runtime,{77630,190}}]}]}] [stats:error] [2012-03-26 2:45:00] [ns_1@127.0.0.1:<0.28023.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:00] [ns_1@127.0.0.1:<0.28009.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:00] [ns_1@127.0.0.1:<0.28075.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:45:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28045.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:45:01] [ns_1@127.0.0.1:<0.28075.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:info] [2012-03-26 2:45:02] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28085.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:45:02] [ns_1@127.0.0.1:<0.28026.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:02] [ns_1@127.0.0.1:<0.28075.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:03] [ns_1@127.0.0.1:<0.28028.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:03] [ns_1@127.0.0.1:<0.28034.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:45:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28075.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:45:04] [ns_1@127.0.0.1:<0.28036.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:04] [ns_1@127.0.0.1:<0.28038.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:05] [ns_1@127.0.0.1:<0.28040.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:05] [ns_1@127.0.0.1:<0.28046.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:45:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28085.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:45:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28122.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:45:06] [ns_1@127.0.0.1:<0.28048.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:06] [ns_1@127.0.0.1:<0.28051.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:07] [ns_1@127.0.0.1:<0.28055.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:07] [ns_1@127.0.0.1:<0.28059.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:08] [ns_1@127.0.0.1:<0.28061.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:08] [ns_1@127.0.0.1:<0.28063.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:09] [ns_1@127.0.0.1:<0.28065.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:09] [ns_1@127.0.0.1:<0.28079.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:09] [ns_1@127.0.0.1:<0.28149.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:10] [ns_1@127.0.0.1:<0.28081.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:10] [ns_1@127.0.0.1:<0.28103.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:10] [ns_1@127.0.0.1:<0.28149.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:45:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28122.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:45:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28158.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:45:11] [ns_1@127.0.0.1:<0.28105.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:11] [ns_1@127.0.0.1:<0.28110.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:11] [ns_1@127.0.0.1:<0.28149.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:12] [ns_1@127.0.0.1:<0.28099.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:12] [ns_1@127.0.0.1:<0.28114.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:12] [ns_1@127.0.0.1:<0.28149.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:13] [ns_1@127.0.0.1:<0.28116.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:13] [ns_1@127.0.0.1:<0.28123.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:45:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28149.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:45:14] [ns_1@127.0.0.1:<0.28112.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:14] [ns_1@127.0.0.1:<0.28127.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:16] [ns_1@127.0.0.1:<0.28136.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:45:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28158.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:45:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28191.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:45:16] [ns_1@127.0.0.1:<0.28125.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:17] [ns_1@127.0.0.1:<0.28141.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:17] [ns_1@127.0.0.1:<0.28132.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:17] [ns_1@127.0.0.1:<0.28143.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:17] [ns_1@127.0.0.1:<0.28161.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:18] [ns_1@127.0.0.1:<0.28152.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:18] [ns_1@127.0.0.1:<0.28138.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:19] [ns_1@127.0.0.1:<0.28159.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:19] [ns_1@127.0.0.1:<0.28176.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:19] [ns_1@127.0.0.1:<0.28217.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:20] [ns_1@127.0.0.1:<0.28169.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:20] [ns_1@127.0.0.1:<0.28154.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:20] [ns_1@127.0.0.1:<0.28217.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:45:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28191.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:45:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28227.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:45:21] [ns_1@127.0.0.1:<0.28174.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:21] [ns_1@127.0.0.1:<0.28188.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:21] [ns_1@127.0.0.1:<0.28217.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:22] [ns_1@127.0.0.1:<0.28181.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:22] [ns_1@127.0.0.1:<0.28171.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:22] [ns_1@127.0.0.1:<0.28217.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:23] [ns_1@127.0.0.1:<0.28198.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:23] [ns_1@127.0.0.1:<0.28207.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:45:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28217.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:45:24] [ns_1@127.0.0.1:<0.28192.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:24] [ns_1@127.0.0.1:<0.28183.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:25] [ns_1@127.0.0.1:<0.28200.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:25] [ns_1@127.0.0.1:<0.28220.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:45:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28227.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:45:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28262.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:45:26] [ns_1@127.0.0.1:<0.28209.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:26] [ns_1@127.0.0.1:<0.28194.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:27] [ns_1@127.0.0.1:<0.28202.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:27] [ns_1@127.0.0.1:<0.28235.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:28] [ns_1@127.0.0.1:<0.28222.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:28] [ns_1@127.0.0.1:<0.28211.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:29] [ns_1@127.0.0.1:<0.28213.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:29] [ns_1@127.0.0.1:<0.28251.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:29] [ns_1@127.0.0.1:<0.28286.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:30] [ns_1@127.0.0.1:<0.28240.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:30] [ns_1@127.0.0.1:<0.28228.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:30] [ns_1@127.0.0.1:<0.28286.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:45:31] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28262.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:45:31] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28296.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:45:31] [ns_1@127.0.0.1:<0.28230.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:31] [ns_1@127.0.0.1:<0.28263.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:31] [ns_1@127.0.0.1:<0.28286.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:32] [ns_1@127.0.0.1:<0.28253.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:32] [ns_1@127.0.0.1:<0.28286.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:32] [ns_1@127.0.0.1:<0.28243.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:33] [ns_1@127.0.0.1:<0.28245.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:45:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28286.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:45:33] [ns_1@127.0.0.1:<0.28276.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:34] [ns_1@127.0.0.1:<0.28265.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:34] [ns_1@127.0.0.1:<0.28255.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:35] [ns_1@127.0.0.1:<0.28257.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:35] [ns_1@127.0.0.1:<0.28290.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:45:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28296.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:45:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28331.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:45:36] [ns_1@127.0.0.1:<0.28278.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:36] [ns_1@127.0.0.1:<0.28268.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:37] [ns_1@127.0.0.1:<0.28272.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:37] [ns_1@127.0.0.1:<0.28306.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:38] [ns_1@127.0.0.1:<0.28292.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:38] [ns_1@127.0.0.1:<0.28280.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:39] [ns_1@127.0.0.1:<0.28282.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:39] [ns_1@127.0.0.1:<0.28354.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:39] [ns_1@127.0.0.1:<0.28319.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:40] [ns_1@127.0.0.1:<0.28309.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:40] [ns_1@127.0.0.1:<0.28354.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:40] [ns_1@127.0.0.1:<0.28297.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:45:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28331.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:45:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28365.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:45:41] [ns_1@127.0.0.1:<0.28299.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:41] [ns_1@127.0.0.1:<0.28354.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:41] [ns_1@127.0.0.1:<0.28332.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:42] [ns_1@127.0.0.1:<0.28321.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:42] [ns_1@127.0.0.1:<0.28354.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:42] [ns_1@127.0.0.1:<0.28312.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:43] [ns_1@127.0.0.1:<0.28317.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:45:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28354.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:45:43] [ns_1@127.0.0.1:<0.28345.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:44] [ns_1@127.0.0.1:<0.28334.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:44] [ns_1@127.0.0.1:<0.28323.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:45] [ns_1@127.0.0.1:<0.28328.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:45] [ns_1@127.0.0.1:<0.28359.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:45:46] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28365.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:45:46] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28400.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:45:46] [ns_1@127.0.0.1:<0.28348.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:47] [ns_1@127.0.0.1:<0.28339.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:47] [ns_1@127.0.0.1:<0.28343.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:48] [ns_1@127.0.0.1:<0.28376.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:48] [ns_1@127.0.0.1:<0.28362.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:49] [ns_1@127.0.0.1:<0.28350.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:49] [ns_1@127.0.0.1:<0.28357.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:49] [ns_1@127.0.0.1:<0.28422.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:50] [ns_1@127.0.0.1:<0.28388.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:50] [ns_1@127.0.0.1:<0.28379.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:50] [ns_1@127.0.0.1:<0.28422.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:45:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28400.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:45:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28432.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:45:51] [ns_1@127.0.0.1:<0.28366.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:51] [ns_1@127.0.0.1:<0.28372.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:51] [ns_1@127.0.0.1:<0.28422.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:52] [ns_1@127.0.0.1:<0.28401.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:52] [ns_1@127.0.0.1:<0.28391.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:52] [ns_1@127.0.0.1:<0.28422.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:53] [ns_1@127.0.0.1:<0.28381.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:53] [ns_1@127.0.0.1:<0.28386.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:45:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28422.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:45:54] [ns_1@127.0.0.1:<0.28414.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:54] [ns_1@127.0.0.1:<0.28403.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:55] [ns_1@127.0.0.1:<0.28393.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:55] [ns_1@127.0.0.1:<0.28397.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:45:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28432.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:45:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28467.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:45:56] [ns_1@127.0.0.1:<0.28427.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:56] [ns_1@127.0.0.1:<0.28416.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:57] [ns_1@127.0.0.1:<0.28407.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:57] [ns_1@127.0.0.1:<0.28412.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:58] [ns_1@127.0.0.1:<0.28445.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:58] [ns_1@127.0.0.1:<0.28433.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:59] [ns_1@127.0.0.1:<0.28418.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:45:59] [ns_1@127.0.0.1:<0.28425.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:45:59] [ns_1@127.0.0.1:<0.28492.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:45:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,755157,242819}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38423128}, {processes,10155040}, {processes_used,8529560}, {system,28268088}, {atom,1306681}, {atom_used,1284164}, {binary,478376}, {code,12859877}, {ets,2436984}]}, {system_stats, [{cpu_utilization_rate,25.56390977443609}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,6178}, {memory_data,{4040077312,4015427584,{<0.18771.0>,655560}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27848 kB\nBuffers: 63824 kB\nCached: 3524212 kB\nSwapCached: 0 kB\nActive: 312236 kB\nInactive: 3436900 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27848 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 84 kB\nWriteback: 0 kB\nAnonPages: 161092 kB\nMapped: 24872 kB\nSlab: 134360 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 583120 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3608793088}, {buffered_memory,65355776}, {free_memory,28516352}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{6173980,1}}, {context_switches,{1741473,0}}, {garbage_collection,{924180,1309097492,0}}, {io,{{input,27959122},{output,68073718}}}, {reductions,{366898556,604322}}, {run_queue,0}, {runtime,{78510,220}}]}]}] [stats:error] [2012-03-26 2:46:00] [ns_1@127.0.0.1:<0.28458.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:00] [ns_1@127.0.0.1:<0.28448.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:00] [ns_1@127.0.0.1:<0.28492.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:46:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28467.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:46:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28502.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:46:01] [ns_1@127.0.0.1:<0.28435.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:01] [ns_1@127.0.0.1:<0.28492.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:02] [ns_1@127.0.0.1:<0.28440.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:02] [ns_1@127.0.0.1:<0.28470.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:02] [ns_1@127.0.0.1:<0.28492.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:03] [ns_1@127.0.0.1:<0.28460.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:03] [ns_1@127.0.0.1:<0.28450.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:46:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28492.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:46:04] [ns_1@127.0.0.1:<0.28456.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:04] [ns_1@127.0.0.1:<0.28483.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:05] [ns_1@127.0.0.1:<0.28473.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:05] [ns_1@127.0.0.1:<0.28462.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:46:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28502.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:46:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28535.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:46:06] [ns_1@127.0.0.1:<0.28468.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:06] [ns_1@127.0.0.1:<0.28498.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:07] [ns_1@127.0.0.1:<0.28485.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:07] [ns_1@127.0.0.1:<0.28477.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:08] [ns_1@127.0.0.1:<0.28481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:08] [ns_1@127.0.0.1:<0.28516.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:09] [ns_1@127.0.0.1:<0.28503.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:09] [ns_1@127.0.0.1:<0.28487.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:09] [ns_1@127.0.0.1:<0.28562.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:10] [ns_1@127.0.0.1:<0.28496.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:10] [ns_1@127.0.0.1:<0.28527.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:10] [ns_1@127.0.0.1:<0.28562.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:46:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28535.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:46:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28571.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:46:11] [ns_1@127.0.0.1:<0.28518.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:11] [ns_1@127.0.0.1:<0.28508.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:11] [ns_1@127.0.0.1:<0.28562.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:12] [ns_1@127.0.0.1:<0.28512.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:12] [ns_1@127.0.0.1:<0.28540.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:12] [ns_1@127.0.0.1:<0.28562.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:13] [ns_1@127.0.0.1:<0.28529.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:13] [ns_1@127.0.0.1:<0.28523.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:13] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:46:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28562.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:46:14] [ns_1@127.0.0.1:<0.28525.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:14] [ns_1@127.0.0.1:<0.28554.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:15] [ns_1@127.0.0.1:<0.28545.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:15] [ns_1@127.0.0.1:<0.28536.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:46:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28571.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:46:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28606.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:46:16] [ns_1@127.0.0.1:<0.28538.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:16] [ns_1@127.0.0.1:<0.28572.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:17] [ns_1@127.0.0.1:<0.28556.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:17] [ns_1@127.0.0.1:<0.28549.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:17] [ns_1@127.0.0.1:<0.28565.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:17] [ns_1@127.0.0.1:<0.28580.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:18] [ns_1@127.0.0.1:<0.28551.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:18] [ns_1@127.0.0.1:<0.28587.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:19] [ns_1@127.0.0.1:<0.28574.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:19] [ns_1@127.0.0.1:<0.28632.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:19] [ns_1@127.0.0.1:<0.28594.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:20] [ns_1@127.0.0.1:<0.28567.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:20] [ns_1@127.0.0.1:<0.28632.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:20] [ns_1@127.0.0.1:<0.28599.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:46:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28606.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:46:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28644.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:46:21] [ns_1@127.0.0.1:<0.28589.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:21] [ns_1@127.0.0.1:<0.28632.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:21] [ns_1@127.0.0.1:<0.28607.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:22] [ns_1@127.0.0.1:<0.28584.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:22] [ns_1@127.0.0.1:<0.28632.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:22] [ns_1@127.0.0.1:<0.28611.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:23] [ns_1@127.0.0.1:<0.28601.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:46:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28632.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:46:23] [ns_1@127.0.0.1:<0.28624.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:24] [ns_1@127.0.0.1:<0.28596.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:24] [ns_1@127.0.0.1:<0.28628.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:25] [ns_1@127.0.0.1:<0.28615.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:25] [ns_1@127.0.0.1:<0.28637.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:46:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28644.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:46:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28679.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:46:26] [ns_1@127.0.0.1:<0.28609.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:26] [ns_1@127.0.0.1:<0.28645.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:27] [ns_1@127.0.0.1:<0.28617.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:27] [ns_1@127.0.0.1:<0.28655.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:28] [ns_1@127.0.0.1:<0.28626.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:28] [ns_1@127.0.0.1:<0.28660.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:29] [ns_1@127.0.0.1:<0.28619.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:29] [ns_1@127.0.0.1:<0.28701.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:29] [ns_1@127.0.0.1:<0.28668.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:30] [ns_1@127.0.0.1:<0.28640.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:30] [ns_1@127.0.0.1:<0.28701.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:30] [ns_1@127.0.0.1:<0.28672.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:46:31] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28679.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:46:31] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28713.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:46:31] [ns_1@127.0.0.1:<0.28635.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:31] [ns_1@127.0.0.1:<0.28701.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:31] [ns_1@127.0.0.1:<0.28680.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:32] [ns_1@127.0.0.1:<0.28658.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:32] [ns_1@127.0.0.1:<0.28701.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:33] [ns_1@127.0.0.1:<0.28687.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:33] [ns_1@127.0.0.1:<0.28650.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:33] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:46:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28701.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:46:34] [ns_1@127.0.0.1:<0.28693.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:34] [ns_1@127.0.0.1:<0.28670.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:35] [ns_1@127.0.0.1:<0.28697.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:35] [ns_1@127.0.0.1:<0.28666.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:46:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28713.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:46:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28746.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:46:36] [ns_1@127.0.0.1:<0.28707.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:36] [ns_1@127.0.0.1:<0.28683.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:37] [ns_1@127.0.0.1:<0.28714.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:37] [ns_1@127.0.0.1:<0.28676.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:38] [ns_1@127.0.0.1:<0.28723.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:38] [ns_1@127.0.0.1:<0.28695.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:39] [ns_1@127.0.0.1:<0.28729.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:39] [ns_1@127.0.0.1:<0.28691.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:39] [ns_1@127.0.0.1:<0.28771.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:40] [ns_1@127.0.0.1:<0.28736.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:40] [ns_1@127.0.0.1:<0.28710.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:40] [ns_1@127.0.0.1:<0.28771.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:46:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28746.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:46:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28780.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:46:41] [ns_1@127.0.0.1:<0.28740.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:41] [ns_1@127.0.0.1:<0.28705.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:41] [ns_1@127.0.0.1:<0.28771.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:42] [ns_1@127.0.0.1:<0.28749.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:42] [ns_1@127.0.0.1:<0.28727.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:42] [ns_1@127.0.0.1:<0.28771.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:43] [ns_1@127.0.0.1:<0.28756.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:43] [ns_1@127.0.0.1:<0.28719.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:43] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:46:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28771.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:46:44] [ns_1@127.0.0.1:<0.28762.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:44] [ns_1@127.0.0.1:<0.28738.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:45] [ns_1@127.0.0.1:<0.28767.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:45] [ns_1@127.0.0.1:<0.28734.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:46:46] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28780.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:46:46] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28815.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:46:46] [ns_1@127.0.0.1:<0.28776.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:46] [ns_1@127.0.0.1:<0.28751.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:47] [ns_1@127.0.0.1:<0.28783.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:47] [ns_1@127.0.0.1:<0.28747.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:48] [ns_1@127.0.0.1:<0.28793.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:48] [ns_1@127.0.0.1:<0.28765.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:49] [ns_1@127.0.0.1:<0.28798.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:49] [ns_1@127.0.0.1:<0.28760.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:49] [ns_1@127.0.0.1:<0.28839.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:50] [ns_1@127.0.0.1:<0.28805.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:50] [ns_1@127.0.0.1:<0.28839.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:50] [ns_1@127.0.0.1:<0.28781.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:46:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28815.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:46:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28849.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:46:51] [ns_1@127.0.0.1:<0.28810.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:51] [ns_1@127.0.0.1:<0.28839.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:51] [ns_1@127.0.0.1:<0.28774.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:52] [ns_1@127.0.0.1:<0.28818.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:52] [ns_1@127.0.0.1:<0.28839.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:52] [ns_1@127.0.0.1:<0.28796.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:53] [ns_1@127.0.0.1:<0.28824.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:53] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:46:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28839.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:46:53] [ns_1@127.0.0.1:<0.28789.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:54] [ns_1@127.0.0.1:<0.28831.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:54] [ns_1@127.0.0.1:<0.28808.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:55] [ns_1@127.0.0.1:<0.28835.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:55] [ns_1@127.0.0.1:<0.28803.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:46:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28849.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:46:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28884.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:46:56] [ns_1@127.0.0.1:<0.28845.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:56] [ns_1@127.0.0.1:<0.28820.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:57] [ns_1@127.0.0.1:<0.28855.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:57] [ns_1@127.0.0.1:<0.28816.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:58] [ns_1@127.0.0.1:<0.28863.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:58] [ns_1@127.0.0.1:<0.28833.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:46:59] [ns_1@127.0.0.1:<0.28871.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:46:59] [ns_1@127.0.0.1:<0.28907.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:46:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,755217,276337}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["Default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38471904}, {processes,10161784}, {processes_used,8536304}, {system,28310120}, {atom,1306681}, {atom_used,1284164}, {binary,485176}, {code,12859877}, {ets,2465664}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,6239}, {memory_data,{4040077312,4011569152,{<0.18771.0>,659784}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27476 kB\nBuffers: 63912 kB\nCached: 3524372 kB\nSwapCached: 0 kB\nActive: 312004 kB\nInactive: 3437044 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27476 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 100 kB\nWriteback: 0 kB\nAnonPages: 160804 kB\nMapped: 24872 kB\nSlab: 134340 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580672 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3608956928}, {buffered_memory,65445888}, {free_memory,28135424}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{6234012,1}}, {context_switches,{1754411,0}}, {garbage_collection,{930725,1320084918,0}}, {io,{{input,27989711},{output,68511839}}}, {reductions,{369395207,596040}}, {run_queue,0}, {runtime,{79310,200}}]}]}] [stats:error] [2012-03-26 2:47:00] [ns_1@127.0.0.1:<0.28829.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:00] [ns_1@127.0.0.1:<0.28875.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:47:00] [ns_1@127.0.0.1:<0.28907.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28884.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28917.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:01] [ns_1@127.0.0.1:<0.28850.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:01] [ns_1@127.0.0.1:<0.28881.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:47:01] [ns_1@127.0.0.1:<0.28907.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:02] [ns_1@127.0.0.1:<0.28842.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:02] [ns_1@127.0.0.1:<0.28888.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:47:02] [ns_1@127.0.0.1:<0.28907.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:03] [ns_1@127.0.0.1:<0.28865.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:03] [ns_1@127.0.0.1:<0.28896.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:47:03] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {noproc, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:47:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.28907.1> registered_name: [] exception exit: {noproc, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1912 neighbours: [stats:error] [2012-03-26 2:47:04] [ns_1@127.0.0.1:<0.28860.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:04] [ns_1@127.0.0.1:<0.28900.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:05] [ns_1@127.0.0.1:<0.28877.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:05] [ns_1@127.0.0.1:<0.28911.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28917.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.28952.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:06] [ns_1@127.0.0.1:<0.28873.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:06] [ns_1@127.0.0.1:<0.28918.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:07] [ns_1@127.0.0.1:<0.28892.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:07] [ns_1@127.0.0.1:<0.28925.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:08] [ns_1@127.0.0.1:<0.28885.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:08] [ns_1@127.0.0.1:<0.28933.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:09] [ns_1@127.0.0.1:<0.28902.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "default"}, {single_bucket_sup, start_link, ["default"]}, permanent, infinity, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "default"}, <0.28975.1>, supervisor, [single_bucket_sup]} [error_logger:error] [2012-03-26 2:47:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,ns_bucket_sup} Context: shutdown_error Reason: {{badmatch,{error,shutdown}}, [{single_bucket_sup,'-start_link/1-fun-0-',2}]} Offender: [{pid,<0.18768.0>}, {name,{per_bucket_sup,"Default"}}, {mfargs,{single_bucket_sup,start_link,["Default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.28975.1>}, {name,{per_bucket_sup,"default"}}, {mfargs,{single_bucket_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:error] [2012-03-26 2:47:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.18768.0> on node 'ns_1@127.0.0.1' with exit value: {{badmatch,{error,shutdown}},[{single_bucket_sup,'-start_link/1-fun-0-',2}]} [ns_server:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:ensure_bucket:700] Created bucket "default" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=2153775104;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/default;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=default;couch_port=11213;max_vbuckets=256;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.28978.1>}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.28985.1>}, {name,{ns_memcached,data,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.28986.1>}, {name,{ns_vbm_sup,"default"}}, {mfargs,{ns_vbm_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.28987.1>}, {name,{ns_vbm_new_sup,"default"}}, {mfargs,{ns_vbm_new_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.28988.1>}, {name,{couch_stats_reader,"default"}}, {mfargs,{couch_stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.28989.1>}, {name,{stats_collector,"default"}}, {mfargs,{stats_collector,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.28991.1>}, {name,{stats_archiver,"default"}}, {mfargs,{stats_archiver,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.28993.1>}, {name,{stats_reader,"default"}}, {mfargs,{stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.28994.1>}, {name,{failover_safeness_level,"default"}}, {mfargs, {failover_safeness_level,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.28977.1>}, {name,{ns_memcached_sup,"default"}}, {mfargs,{ns_memcached_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.28995.1>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 0 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 1 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 2 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 3 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 4 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 5 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 6 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 7 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 8 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 9 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 10 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 11 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 12 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 13 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 14 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 15 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 16 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 17 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 18 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 19 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 20 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 21 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 22 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 23 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 24 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 25 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 26 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 27 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 28 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 29 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 30 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 31 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 32 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 33 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 34 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 35 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 36 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 37 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 38 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 39 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 40 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 41 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 42 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 43 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 44 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 45 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 46 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 47 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 48 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 49 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 50 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 51 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 52 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 53 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 54 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 55 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 56 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 57 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 58 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 59 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 60 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 61 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 62 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 63 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 64 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 65 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 66 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 67 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 68 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 69 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 70 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 71 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 72 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 73 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 74 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 75 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 76 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 77 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 78 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 79 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 80 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 81 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 82 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 83 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 84 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 85 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 86 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 87 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 88 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 89 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 90 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 91 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 92 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 93 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 94 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 95 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 96 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 97 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 98 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 99 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 100 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 101 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 102 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 103 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 104 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 105 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 106 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 107 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 108 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 109 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 110 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 111 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 112 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 113 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 114 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 115 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 116 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 117 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 118 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 119 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 120 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 121 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 122 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 123 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 124 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 125 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 126 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 127 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 128 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 129 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 130 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 131 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 132 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 133 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 134 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 135 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 136 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 137 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 138 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 139 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 140 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 141 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 142 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 143 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 144 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 145 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 146 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 147 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 148 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 149 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 150 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 151 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 152 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 153 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 154 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 155 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 156 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 157 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 158 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 159 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 160 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 161 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 162 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 163 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 164 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 165 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 166 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 167 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 168 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 169 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 170 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 171 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 172 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 173 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 174 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 175 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 176 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 177 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 178 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 179 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 180 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 181 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 182 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 183 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 184 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 185 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 186 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 187 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 188 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 189 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 190 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 191 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 192 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 193 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 194 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 195 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 196 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 197 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 198 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 199 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 200 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 201 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 202 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 203 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 204 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 205 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 206 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 207 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 208 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 209 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 210 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 211 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 212 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 213 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 214 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 215 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 216 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 217 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 218 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 219 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 220 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 221 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 222 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 223 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 224 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 225 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 226 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 227 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 228 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 229 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 230 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 231 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 232 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 233 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 234 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 235 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 236 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 237 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 238 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 239 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 240 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 241 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 242 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 243 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 244 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 245 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 246 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 247 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 248 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 249 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 250 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 251 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 252 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 253 in <<"default">>: {not_found, no_db_file} [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 254 in <<"default">>: {not_found, no_db_file} [views:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [couchdb:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 255 in <<"default">>: {not_found, no_db_file} [views:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:<0.28975.1>:single_bucket_sup:top_loop:27] Delegating exit {'EXIT', <0.385.0>, shutdown} to child supervisor: <0.28976.1> [error_logger:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.29009.1>}, {name,{capi_set_view_manager,"default"}}, {mfargs,{capi_set_view_manager,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [user:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:terminate:348] Shutting down bucket "default" on 'ns_1@127.0.0.1' for server shutdown [ns_server:info] [2012-03-26 2:47:09] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Had to wait 1h:42m:49s for shutdown memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.366.0>: Failed to load mutation log, falling back to key dump memcached<0.366.0>: metadata loaded in 454 usec memcached<0.366.0>: warmup completed in 3980 usec [stats:error] [2012-03-26 2:47:09] [ns_1@127.0.0.1:<0.28940.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:10] [ns_1@127.0.0.1:<0.28898.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:47:10] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Shutting down tap connections! [stats:error] [2012-03-26 2:47:10] [ns_1@127.0.0.1:<0.28944.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:11] [ns_1@127.0.0.1:<0.28920.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:11] [ns_1@127.0.0.1:<0.28953.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.360.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29291.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:12] [ns_1@127.0.0.1:<0.28913.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:12] [ns_1@127.0.0.1:<0.28957.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:13] [ns_1@127.0.0.1:<0.28935.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:13] [ns_1@127.0.0.1:<0.28966.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:14] [ns_1@127.0.0.1:<0.28929.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:14] [ns_1@127.0.0.1:<0.28971.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:47:14] [ns_1@127.0.0.1:<0.29276.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29291.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29309.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:15] [ns_1@127.0.0.1:<0.28946.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:15] [ns_1@127.0.0.1:<0.29279.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:16] [ns_1@127.0.0.1:<0.28942.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:16] [ns_1@127.0.0.1:<0.29284.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:17] [ns_1@127.0.0.1:<0.28962.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:17] [ns_1@127.0.0.1:<0.29292.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:17] [ns_1@127.0.0.1:<0.29302.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:17] [ns_1@127.0.0.1:<0.29317.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29309.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29332.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:17] [ns_1@127.0.0.1:<0.28955.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:17] [ns_1@127.0.0.1:<0.28968.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:18] [ns_1@127.0.0.1:<0.29281.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:18] [ns_1@127.0.0.1:<0.29296.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:19] [ns_1@127.0.0.1:<0.29270.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:47:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.29276.1>} [stats:error] [2012-03-26 2:47:19] [ns_1@127.0.0.1:<0.29337.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:20] [ns_1@127.0.0.1:<0.29294.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:47:20] [ns_1@127.0.0.1:<0.29276.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29332.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29352.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:20] [ns_1@127.0.0.1:<0.29310.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:21] [ns_1@127.0.0.1:<0.29286.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:21] [ns_1@127.0.0.1:<0.29349.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:22] [ns_1@127.0.0.1:<0.29306.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:22] [ns_1@127.0.0.1:<0.29321.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:23] [ns_1@127.0.0.1:<0.29298.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29352.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29371.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:23] [ns_1@127.0.0.1:<0.29362.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:24] [ns_1@127.0.0.1:<0.29319.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:24] [ns_1@127.0.0.1:<0.29341.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:25] [ns_1@127.0.0.1:<0.29312.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:25] [ns_1@127.0.0.1:<0.29374.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:26] [ns_1@127.0.0.1:<0.29339.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:47:26] [ns_1@127.0.0.1:<0.29276.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29371.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29390.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:26] [ns_1@127.0.0.1:<0.29355.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:27] [ns_1@127.0.0.1:<0.29325.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:28] [ns_1@127.0.0.1:<0.29387.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:28] [ns_1@127.0.0.1:<0.29353.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:29] [ns_1@127.0.0.1:<0.29366.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:29] [ns_1@127.0.0.1:<0.29327.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:47:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.29276.1>} [error_logger:error] [2012-03-26 2:47:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29390.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29411.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:30] [ns_1@127.0.0.1:<0.29400.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:30] [ns_1@127.0.0.1:<0.29364.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:31] [ns_1@127.0.0.1:<0.29381.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:31] [ns_1@127.0.0.1:<0.29329.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:32] [ns_1@127.0.0.1:<0.29414.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:47:32] [ns_1@127.0.0.1:<0.29276.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:32] [ns_1@127.0.0.1:<0.29379.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29411.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29427.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:33] [ns_1@127.0.0.1:<0.29393.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:33] [ns_1@127.0.0.1:<0.29333.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:34] [ns_1@127.0.0.1:<0.29424.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:34] [ns_1@127.0.0.1:<0.29391.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:35] [ns_1@127.0.0.1:<0.29404.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29427.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29446.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:35] [ns_1@127.0.0.1:<0.29347.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:36] [ns_1@127.0.0.1:<0.29439.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:36] [ns_1@127.0.0.1:<0.29402.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:37] [ns_1@127.0.0.1:<0.29420.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:37] [ns_1@127.0.0.1:<0.29360.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:38] [ns_1@127.0.0.1:<0.29451.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29446.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29462.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:38] [ns_1@127.0.0.1:<0.29416.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:39] [ns_1@127.0.0.1:<0.29433.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:47:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.29276.1>} [stats:error] [2012-03-26 2:47:39] [ns_1@127.0.0.1:<0.29372.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:40] [ns_1@127.0.0.1:<0.29463.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:40] [ns_1@127.0.0.1:<0.29428.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.28952.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29478.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:41] [ns_1@127.0.0.1:<0.29335.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29462.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29484.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:41] [ns_1@127.0.0.1:<0.29385.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:42] [ns_1@127.0.0.1:<0.29476.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:42] [ns_1@127.0.0.1:<0.29441.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:43] [ns_1@127.0.0.1:<0.29447.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:43] [ns_1@127.0.0.1:<0.29398.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:44] [ns_1@127.0.0.1:<0.29489.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29484.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29502.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:45] [ns_1@127.0.0.1:<0.29453.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:45] [ns_1@127.0.0.1:<0.29457.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:46] [ns_1@127.0.0.1:<0.29412.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:46] [ns_1@127.0.0.1:<0.29503.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:47] [ns_1@127.0.0.1:<0.29465.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:47] [ns_1@127.0.0.1:<0.29472.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:47] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29502.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:47] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29521.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:48] [ns_1@127.0.0.1:<0.29422.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:48] [ns_1@127.0.0.1:<0.29514.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:49] [ns_1@127.0.0.1:<0.29479.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:49] [ns_1@127.0.0.1:<0.29485.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:47:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.29276.1>} [stats:error] [2012-03-26 2:47:50] [ns_1@127.0.0.1:<0.29435.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:50] [ns_1@127.0.0.1:<0.29526.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29521.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29539.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:51] [ns_1@127.0.0.1:<0.29491.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:51] [ns_1@127.0.0.1:<0.29495.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:52] [ns_1@127.0.0.1:<0.29449.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:52] [ns_1@127.0.0.1:<0.29540.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:53] [ns_1@127.0.0.1:<0.29505.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:53] [ns_1@127.0.0.1:<0.29509.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29539.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29557.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:54] [ns_1@127.0.0.1:<0.29459.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:54] [ns_1@127.0.0.1:<0.29550.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:55] [ns_1@127.0.0.1:<0.29516.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:55] [ns_1@127.0.0.1:<0.29522.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:56] [ns_1@127.0.0.1:<0.29474.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:56] [ns_1@127.0.0.1:<0.29565.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:47:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29557.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29576.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:47:57] [ns_1@127.0.0.1:<0.29528.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:57] [ns_1@127.0.0.1:<0.29534.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:58] [ns_1@127.0.0.1:<0.29487.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:58] [ns_1@127.0.0.1:<0.29577.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:59] [ns_1@127.0.0.1:<0.29542.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:47:59] [ns_1@127.0.0.1:<0.29546.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:47:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.29276.1>} [ns_doctor:info] [2012-03-26 2:47:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,755279,657344}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38694712}, {processes,10760160}, {processes_used,9158504}, {system,27934552}, {atom,1306681}, {atom_used,1284164}, {binary,748720}, {code,12859877}, {ets,2443856}]}, {system_stats, [{cpu_utilization_rate,25.25}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,6301}, {memory_data,{4040077312,4011798528,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27616 kB\nBuffers: 63988 kB\nCached: 3524724 kB\nSwapCached: 0 kB\nActive: 312156 kB\nInactive: 3437352 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27616 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 12 kB\nWriteback: 0 kB\nAnonPages: 160832 kB\nMapped: 24872 kB\nSlab: 134328 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580460 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3609317376}, {buffered_memory,65523712}, {free_memory,28278784}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{6296393,2}}, {context_switches,{1771059,0}}, {garbage_collection,{941301,1333288297,0}}, {io,{{input,28013812},{output,69136517}}}, {reductions,{372954166,621435}}, {run_queue,0}, {runtime,{80370,170}}]}]}] [error_logger:error] [2012-03-26 2:47:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29576.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:47:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29610.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:00] [ns_1@127.0.0.1:<0.29497.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:00] [ns_1@127.0.0.1:<0.29587.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:01] [ns_1@127.0.0.1:<0.29552.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:01] [ns_1@127.0.0.1:<0.29558.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:02] [ns_1@127.0.0.1:<0.29512.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:02] [ns_1@127.0.0.1:<0.29615.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:02] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29610.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:02] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29626.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:03] [ns_1@127.0.0.1:<0.29567.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:03] [ns_1@127.0.0.1:<0.29571.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:04] [ns_1@127.0.0.1:<0.29524.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:04] [ns_1@127.0.0.1:<0.29629.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:05] [ns_1@127.0.0.1:<0.29579.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:05] [ns_1@127.0.0.1:<0.29583.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29626.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29648.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:06] [ns_1@127.0.0.1:<0.29536.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:06] [ns_1@127.0.0.1:<0.29641.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:07] [ns_1@127.0.0.1:<0.29604.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:07] [ns_1@127.0.0.1:<0.29611.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:08] [ns_1@127.0.0.1:<0.29548.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:48:08] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:48:08] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.29276.1> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [error_logger:error] [2012-03-26 2:48:08] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29648.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:08] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29662.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:08] [ns_1@127.0.0.1:<0.29653.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:09] [ns_1@127.0.0.1:<0.29617.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:09] [ns_1@127.0.0.1:<0.29621.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:10] [ns_1@127.0.0.1:<0.29560.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:10] [ns_1@127.0.0.1:<0.29665.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:11] [ns_1@127.0.0.1:<0.29633.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29662.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29688.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:11] [ns_1@127.0.0.1:<0.29635.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:12] [ns_1@127.0.0.1:<0.29573.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:12] [ns_1@127.0.0.1:<0.29683.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:13] [ns_1@127.0.0.1:<0.29645.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:13] [ns_1@127.0.0.1:<0.29649.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:14] [ns_1@127.0.0.1:<0.29585.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:48:14] [ns_1@127.0.0.1:<0.29674.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29688.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29706.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:15] [ns_1@127.0.0.1:<0.29695.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:15] [ns_1@127.0.0.1:<0.29657.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:16] [ns_1@127.0.0.1:<0.29659.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29478.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29716.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:16] [ns_1@127.0.0.1:<0.29613.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:17] [ns_1@127.0.0.1:<0.29709.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:17] [ns_1@127.0.0.1:<0.29677.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29706.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29726.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:18] [ns_1@127.0.0.1:<0.29679.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:18] [ns_1@127.0.0.1:<0.29691.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:18] [ns_1@127.0.0.1:<0.29623.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:18] [ns_1@127.0.0.1:<0.29639.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:48:18] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason shutdown [ns_server:info] [2012-03-26 2:48:18] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 2:48:18] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 2:48:18] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:48:18] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:48:18] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [user:info] [2012-03-26 2:48:20] [ns_1@127.0.0.1:<0.29752.1>:menelaus_web_alerts_srv:global_alert:64] Approaching full disk warning. Usage of disk "/" on node "127.0.0.1" is around 100%. [ns_server:warn] [2012-03-26 2:48:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 2:48:23] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 2:48:23] [ns_1@127.0.0.1:<0.29651.1>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 2:48:23] [ns_1@127.0.0.1:<0.29714.1>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 2:48:23] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 2:48:23] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 2:48:23] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:48:23] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:48:23] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 2:48:23] [ns_1@127.0.0.1:<0.29727.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:48:23] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 2:48:23] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:48:23] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:48:23] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 2:48:23] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 02:48:25: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 2:48:23] [ns_1@127.0.0.1:<0.29729.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29726.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29781.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:24] [ns_1@127.0.0.1:<0.29663.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:24] [ns_1@127.0.0.1:<0.29681.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:25] [ns_1@127.0.0.1:<0.29776.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:25] [ns_1@127.0.0.1:<0.29731.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:26] [ns_1@127.0.0.1:<0.29787.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:26] [ns_1@127.0.0.1:<0.29693.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29781.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29801.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:27] [ns_1@127.0.0.1:<0.29791.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:27] [ns_1@127.0.0.1:<0.29734.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:48:28] [ns_1@127.0.0.1:<0.29766.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:28] [ns_1@127.0.0.1:<0.29798.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:28] [ns_1@127.0.0.1:<0.29707.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:48:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.29766.1>} [stats:error] [2012-03-26 2:48:29] [ns_1@127.0.0.1:<0.29804.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29801.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:31] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29820.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:32] [ns_1@127.0.0.1:<0.29810.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:32] [ns_1@127.0.0.1:<0.29719.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29820.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29828.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:33] [ns_1@127.0.0.1:<0.29737.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:33] [ns_1@127.0.0.1:<0.29739.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:48:34] [ns_1@127.0.0.1:<0.29766.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:34] [ns_1@127.0.0.1:<0.29825.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:34] [ns_1@127.0.0.1:<0.29759.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:35] [ns_1@127.0.0.1:<0.29831.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:35] [ns_1@127.0.0.1:<0.29741.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29828.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29849.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:36] [ns_1@127.0.0.1:<0.29839.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:37] [ns_1@127.0.0.1:<0.29760.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:37] [ns_1@127.0.0.1:<0.29846.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:38] [ns_1@127.0.0.1:<0.29782.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:38] [ns_1@127.0.0.1:<0.29852.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29849.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29863.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:39] [ns_1@127.0.0.1:<0.29761.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:39] [ns_1@127.0.0.1:<0.29858.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:48:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.29766.1>} [stats:error] [2012-03-26 2:48:40] [ns_1@127.0.0.1:<0.29796.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:48:40] [ns_1@127.0.0.1:<0.29766.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:40] [ns_1@127.0.0.1:<0.29864.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:41] [ns_1@127.0.0.1:<0.29762.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:41] [ns_1@127.0.0.1:<0.29873.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29863.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29885.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:42] [ns_1@127.0.0.1:<0.29808.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:42] [ns_1@127.0.0.1:<0.29877.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:43] [ns_1@127.0.0.1:<0.29768.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:43] [ns_1@127.0.0.1:<0.29886.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:44] [ns_1@127.0.0.1:<0.29823.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:44] [ns_1@127.0.0.1:<0.29890.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29885.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29903.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:45] [ns_1@127.0.0.1:<0.29789.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:45] [ns_1@127.0.0.1:<0.29896.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:48:46] [ns_1@127.0.0.1:<0.29766.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:46] [ns_1@127.0.0.1:<0.29835.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:46] [ns_1@127.0.0.1:<0.29904.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:47] [ns_1@127.0.0.1:<0.29802.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:47] [ns_1@127.0.0.1:<0.29910.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:47] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29903.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:47] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29922.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:48] [ns_1@127.0.0.1:<0.29850.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:48] [ns_1@127.0.0.1:<0.29915.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:49] [ns_1@127.0.0.1:<0.29813.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:49] [ns_1@127.0.0.1:<0.29923.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:48:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.29766.1>} [stats:error] [2012-03-26 2:48:50] [ns_1@127.0.0.1:<0.29860.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:50] [ns_1@127.0.0.1:<0.29927.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29922.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29940.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:51] [ns_1@127.0.0.1:<0.29854.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:51] [ns_1@127.0.0.1:<0.29935.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:52] [ns_1@127.0.0.1:<0.29875.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:52] [ns_1@127.0.0.1:<0.29829.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:53] [ns_1@127.0.0.1:<0.29866.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29940.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29956.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:53] [ns_1@127.0.0.1:<0.29947.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:54] [ns_1@127.0.0.1:<0.29888.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:54] [ns_1@127.0.0.1:<0.29941.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:55] [ns_1@127.0.0.1:<0.29880.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:55] [ns_1@127.0.0.1:<0.29959.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29716.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29972.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:56] [ns_1@127.0.0.1:<0.29898.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:48:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29956.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29976.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:56] [ns_1@127.0.0.1:<0.29842.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:57] [ns_1@127.0.0.1:<0.29892.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:57] [ns_1@127.0.0.1:<0.29973.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:58] [ns_1@127.0.0.1:<0.29912.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:58] [ns_1@127.0.0.1:<0.29951.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:48:59] [ns_1@127.0.0.1:<0.29906.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:48:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.29766.1>} [ns_doctor:info] [2012-03-26 2:48:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,755339,688945}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38305568}, {processes,10578576}, {processes_used,8968216}, {system,27726992}, {atom,1306681}, {atom_used,1284164}, {binary,579792}, {code,12859877}, {ets,2416096}]}, {system_stats, [{cpu_utilization_rate,25.376884422110553}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,6361}, {memory_data,{4040077312,4011798528,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27616 kB\nBuffers: 64096 kB\nCached: 3524864 kB\nSwapCached: 0 kB\nActive: 312284 kB\nInactive: 3437512 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27616 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 12 kB\nWriteback: 0 kB\nAnonPages: 160844 kB\nMapped: 24872 kB\nSlab: 134320 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 578328 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3609460736}, {buffered_memory,65634304}, {free_memory,28278784}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{6356424,0}}, {context_switches,{1783555,0}}, {garbage_collection,{947903,1344172911,0}}, {io,{{input,28285920},{output,69792979}}}, {reductions,{375399321,580122}}, {run_queue,0}, {runtime,{81190,200}}]}]}] [error_logger:error] [2012-03-26 2:48:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29976.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:48:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.29997.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:48:59] [ns_1@127.0.0.1:<0.29985.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:00] [ns_1@127.0.0.1:<0.29925.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:01] [ns_1@127.0.0.1:<0.29966.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:01] [ns_1@127.0.0.1:<0.29917.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:02] [ns_1@127.0.0.1:<0.30000.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:02] [ns_1@127.0.0.1:<0.29937.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:49:02] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.29997.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:02] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30014.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:49:03] [ns_1@127.0.0.1:<0.29979.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:03] [ns_1@127.0.0.1:<0.29931.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:04] [ns_1@127.0.0.1:<0.30011.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:04] [ns_1@127.0.0.1:<0.29949.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:05] [ns_1@127.0.0.1:<0.29989.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:05] [ns_1@127.0.0.1:<0.29945.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:49:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30014.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30034.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:49:06] [ns_1@127.0.0.1:<0.30023.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:06] [ns_1@127.0.0.1:<0.29964.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:07] [ns_1@127.0.0.1:<0.30005.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:07] [ns_1@127.0.0.1:<0.29957.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:08] [ns_1@127.0.0.1:<0.30037.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:08] [ns_1@127.0.0.1:<0.29977.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:49:08] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30034.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:08] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30050.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:49:09] [ns_1@127.0.0.1:<0.30017.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:09] [ns_1@127.0.0.1:<0.29970.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:49:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.29766.1>} [stats:error] [2012-03-26 2:49:10] [ns_1@127.0.0.1:<0.30047.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:10] [ns_1@127.0.0.1:<0.29987.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:11] [ns_1@127.0.0.1:<0.30029.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:11] [ns_1@127.0.0.1:<0.29983.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:49:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30050.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30073.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:49:12] [ns_1@127.0.0.1:<0.30064.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:12] [ns_1@127.0.0.1:<0.30002.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:13] [ns_1@127.0.0.1:<0.30041.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:13] [ns_1@127.0.0.1:<0.29998.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:14] [ns_1@127.0.0.1:<0.30076.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:14] [ns_1@127.0.0.1:<0.30015.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:49:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30073.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30091.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:49:15] [ns_1@127.0.0.1:<0.30053.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:15] [ns_1@127.0.0.1:<0.30009.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:16] [ns_1@127.0.0.1:<0.30088.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:16] [ns_1@127.0.0.1:<0.30027.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:17] [ns_1@127.0.0.1:<0.30068.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:17] [ns_1@127.0.0.1:<0.30021.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:49:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30091.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30109.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:49:18] [ns_1@127.0.0.1:<0.30100.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:18] [ns_1@127.0.0.1:<0.30039.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:19] [ns_1@127.0.0.1:<0.30080.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:49:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.29766.1>} [stats:error] [2012-03-26 2:49:19] [ns_1@127.0.0.1:<0.30035.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:20] [ns_1@127.0.0.1:<0.30112.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:20] [ns_1@127.0.0.1:<0.30051.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:49:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30109.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30127.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:49:21] [ns_1@127.0.0.1:<0.30094.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:21] [ns_1@127.0.0.1:<0.30045.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:49:22] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:49:22] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.29766.1> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [stats:error] [2012-03-26 2:49:22] [ns_1@127.0.0.1:<0.30124.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:22] [ns_1@127.0.0.1:<0.30066.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:23] [ns_1@127.0.0.1:<0.30104.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:49:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30127.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30143.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-03-26 2:49:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30143.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30150.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-03-26 2:49:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30150.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30161.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-03-26 2:49:31] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.29972.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:31] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30162.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-03-26 2:49:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30161.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30166.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:49:33] [ns_1@127.0.0.1:<0.30120.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:33] [ns_1@127.0.0.1:<0.30062.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:34] [ns_1@127.0.0.1:<0.30136.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:49:34] [ns_1@127.0.0.1:<0.30157.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:34] [ns_1@127.0.0.1:<0.30078.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:35] [ns_1@127.0.0.1:<0.30132.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:49:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30166.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30185.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:49:35] [ns_1@127.0.0.1:<0.30074.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:36] [ns_1@127.0.0.1:<0.30177.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:36] [ns_1@127.0.0.1:<0.30092.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:37] [ns_1@127.0.0.1:<0.30171.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:37] [ns_1@127.0.0.1:<0.30084.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:38] [ns_1@127.0.0.1:<0.30191.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:49:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30185.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30202.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:49:38] [ns_1@127.0.0.1:<0.30102.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:39] [ns_1@127.0.0.1:<0.30186.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:49:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.30157.1>} [stats:error] [2012-03-26 2:49:39] [ns_1@127.0.0.1:<0.30098.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:40] [ns_1@127.0.0.1:<0.30203.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:49:40] [ns_1@127.0.0.1:<0.30157.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:40] [ns_1@127.0.0.1:<0.30114.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:41] [ns_1@127.0.0.1:<0.30197.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:49:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30202.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30224.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:49:42] [ns_1@127.0.0.1:<0.30110.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:42] [ns_1@127.0.0.1:<0.30216.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:43] [ns_1@127.0.0.1:<0.30128.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:43] [ns_1@127.0.0.1:<0.30212.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:44] [ns_1@127.0.0.1:<0.30122.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:44] [ns_1@127.0.0.1:<0.30229.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:49:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30224.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30242.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:49:45] [ns_1@127.0.0.1:<0.30138.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:45] [ns_1@127.0.0.1:<0.30225.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:46] [ns_1@127.0.0.1:<0.30134.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:46] [ns_1@127.0.0.1:<0.30243.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:49:46] [ns_1@127.0.0.1:<0.30157.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:47] [ns_1@127.0.0.1:<0.30167.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:47] [ns_1@127.0.0.1:<0.30235.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:49:47] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30242.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:47] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30261.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:49:48] [ns_1@127.0.0.1:<0.30173.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:48] [ns_1@127.0.0.1:<0.30253.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:49] [ns_1@127.0.0.1:<0.30179.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:49] [ns_1@127.0.0.1:<0.30249.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:49:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.30157.1>} [stats:error] [2012-03-26 2:49:50] [ns_1@127.0.0.1:<0.30189.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:50] [ns_1@127.0.0.1:<0.30266.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:49:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30261.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30279.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:49:51] [ns_1@127.0.0.1:<0.30193.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:51] [ns_1@127.0.0.1:<0.30262.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:52] [ns_1@127.0.0.1:<0.30199.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:52] [ns_1@127.0.0.1:<0.30280.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:49:52] [ns_1@127.0.0.1:<0.30157.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:53] [ns_1@127.0.0.1:<0.30205.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:49:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30279.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30296.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:49:54] [ns_1@127.0.0.1:<0.30214.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:54] [ns_1@127.0.0.1:<0.30290.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:55] [ns_1@127.0.0.1:<0.30218.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:55] [ns_1@127.0.0.1:<0.30274.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:56] [ns_1@127.0.0.1:<0.30227.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:56] [ns_1@127.0.0.1:<0.30304.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:49:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30296.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30315.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:49:57] [ns_1@127.0.0.1:<0.30231.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:57] [ns_1@127.0.0.1:<0.30286.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:58] [ns_1@127.0.0.1:<0.30237.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:58] [ns_1@127.0.0.1:<0.30316.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:49:59] [ns_1@127.0.0.1:<0.30245.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:49:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.30157.1>} [ns_doctor:info] [2012-03-26 2:49:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,755399,719378}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38345800}, {processes,10583464}, {processes_used,8974120}, {system,27762336}, {atom,1306681}, {atom_used,1284164}, {binary,579184}, {code,12859877}, {ets,2445040}]}, {system_stats, [{cpu_utilization_rate,25.376884422110553}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,6421}, {memory_data,{4040077312,4011671552,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27740 kB\nBuffers: 64172 kB\nCached: 3525024 kB\nSwapCached: 0 kB\nActive: 312352 kB\nInactive: 3437652 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27740 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 28 kB\nWriteback: 0 kB\nAnonPages: 160844 kB\nMapped: 24872 kB\nSlab: 134308 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 578328 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3609624576}, {buffered_memory,65712128}, {free_memory,28405760}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{6416456,1}}, {context_switches,{1794681,0}}, {garbage_collection,{953783,1353276194,0}}, {io,{{input,28295872},{output,70125728}}}, {reductions,{377552946,566146}}, {run_queue,0}, {runtime,{81850,180}}]}]}] [stats:error] [2012-03-26 2:49:59] [ns_1@127.0.0.1:<0.30297.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:49:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30315.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:49:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30336.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:00] [ns_1@127.0.0.1:<0.30251.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:00] [ns_1@127.0.0.1:<0.30326.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:01] [ns_1@127.0.0.1:<0.30255.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:01] [ns_1@127.0.0.1:<0.30310.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:02] [ns_1@127.0.0.1:<0.30264.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:02] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30336.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:02] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30350.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:03] [ns_1@127.0.0.1:<0.30341.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:03] [ns_1@127.0.0.1:<0.30268.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:04] [ns_1@127.0.0.1:<0.30322.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30350.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30364.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-03-26 2:50:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30162.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30365.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:06] [ns_1@127.0.0.1:<0.30302.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:07] [ns_1@127.0.0.1:<0.30353.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:07] [ns_1@127.0.0.1:<0.30308.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:08] [ns_1@127.0.0.1:<0.30347.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:08] [ns_1@127.0.0.1:<0.30312.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:08] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30364.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:08] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30379.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:09] [ns_1@127.0.0.1:<0.30282.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:50:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.30157.1>} [stats:error] [2012-03-26 2:50:09] [ns_1@127.0.0.1:<0.30320.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:10] [ns_1@127.0.0.1:<0.30337.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:10] [ns_1@127.0.0.1:<0.30324.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:11] [ns_1@127.0.0.1:<0.30370.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:11] [ns_1@127.0.0.1:<0.30333.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30379.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30403.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:12] [ns_1@127.0.0.1:<0.30276.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:12] [ns_1@127.0.0.1:<0.30339.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:13] [ns_1@127.0.0.1:<0.30389.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:13] [ns_1@127.0.0.1:<0.30345.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:14] [ns_1@127.0.0.1:<0.30288.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:14] [ns_1@127.0.0.1:<0.30351.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30403.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30421.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:15] [ns_1@127.0.0.1:<0.30400.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:15] [ns_1@127.0.0.1:<0.30357.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:16] [ns_1@127.0.0.1:<0.30366.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:16] [ns_1@127.0.0.1:<0.30368.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:17] [ns_1@127.0.0.1:<0.30412.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30421.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30437.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:17] [ns_1@127.0.0.1:<0.30374.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:18] [ns_1@127.0.0.1:<0.30376.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:18] [ns_1@127.0.0.1:<0.30380.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:19] [ns_1@127.0.0.1:<0.30426.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:50:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.30157.1>} [stats:error] [2012-03-26 2:50:19] [ns_1@127.0.0.1:<0.30428.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:20] [ns_1@127.0.0.1:<0.30393.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30437.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30455.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:20] [ns_1@127.0.0.1:<0.30396.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:21] [ns_1@127.0.0.1:<0.30438.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:21] [ns_1@127.0.0.1:<0.30391.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:22] [ns_1@127.0.0.1:<0.30406.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:22] [ns_1@127.0.0.1:<0.30408.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:23] [ns_1@127.0.0.1:<0.30450.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30455.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30473.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:24] [ns_1@127.0.0.1:<0.30414.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:24] [ns_1@127.0.0.1:<0.30418.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:25] [ns_1@127.0.0.1:<0.30422.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:25] [ns_1@127.0.0.1:<0.30462.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:26] [ns_1@127.0.0.1:<0.30404.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:26] [ns_1@127.0.0.1:<0.30430.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30473.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30492.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:27] [ns_1@127.0.0.1:<0.30432.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:27] [ns_1@127.0.0.1:<0.30474.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:28] [ns_1@127.0.0.1:<0.30440.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:28] [ns_1@127.0.0.1:<0.30442.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:50:28] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:50:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.30157.1> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [stats:error] [2012-03-26 2:50:29] [ns_1@127.0.0.1:<0.30444.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:29] [ns_1@127.0.0.1:<0.30487.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30492.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30515.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:30] [ns_1@127.0.0.1:<0.30452.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:30] [ns_1@127.0.0.1:<0.30456.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:31] [ns_1@127.0.0.1:<0.30458.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:31] [ns_1@127.0.0.1:<0.30499.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:32] [ns_1@127.0.0.1:<0.30464.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:32] [ns_1@127.0.0.1:<0.30466.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30515.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30531.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:33] [ns_1@127.0.0.1:<0.30468.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:33] [ns_1@127.0.0.1:<0.30516.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:33] [ns_1@127.0.0.1:<0.30526.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:33] [ns_1@127.0.0.1:<0.30476.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:34] [ns_1@127.0.0.1:<0.30489.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:34] [ns_1@127.0.0.1:<0.30481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:50:34] [ns_1@127.0.0.1:<0.30511.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:35] [ns_1@127.0.0.1:<0.30483.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:35] [ns_1@127.0.0.1:<0.30542.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30531.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30556.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:36] [ns_1@127.0.0.1:<0.30501.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:36] [ns_1@127.0.0.1:<0.30493.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:37] [ns_1@127.0.0.1:<0.30495.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:37] [ns_1@127.0.0.1:<0.30557.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:38] [ns_1@127.0.0.1:<0.30518.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:38] [ns_1@127.0.0.1:<0.30503.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30556.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30572.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:39] [ns_1@127.0.0.1:<0.30505.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:50:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.30511.1>} [stats:error] [2012-03-26 2:50:39] [ns_1@127.0.0.1:<0.30567.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:40] [ns_1@127.0.0.1:<0.30528.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:50:40] [ns_1@127.0.0.1:<0.30511.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:40] [ns_1@127.0.0.1:<0.30520.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30365.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30586.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:41] [ns_1@127.0.0.1:<0.30522.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:41] [ns_1@127.0.0.1:<0.30582.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30572.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30595.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:42] [ns_1@127.0.0.1:<0.30544.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:42] [ns_1@127.0.0.1:<0.30532.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:43] [ns_1@127.0.0.1:<0.30534.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:43] [ns_1@127.0.0.1:<0.30596.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:44] [ns_1@127.0.0.1:<0.30559.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30595.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30611.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:44] [ns_1@127.0.0.1:<0.30548.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:45] [ns_1@127.0.0.1:<0.30536.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:45] [ns_1@127.0.0.1:<0.30606.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:46] [ns_1@127.0.0.1:<0.30569.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:50:46] [ns_1@127.0.0.1:<0.30511.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:46] [ns_1@127.0.0.1:<0.30561.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:47] [ns_1@127.0.0.1:<0.30538.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:47] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30611.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:47] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30631.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:47] [ns_1@127.0.0.1:<0.30621.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:48] [ns_1@127.0.0.1:<0.30584.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:48] [ns_1@127.0.0.1:<0.30573.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:49] [ns_1@127.0.0.1:<0.30550.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:50:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.30511.1>} [stats:error] [2012-03-26 2:50:50] [ns_1@127.0.0.1:<0.30634.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:50] [ns_1@127.0.0.1:<0.30598.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30631.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30649.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:51] [ns_1@127.0.0.1:<0.30587.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:51] [ns_1@127.0.0.1:<0.30563.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:52] [ns_1@127.0.0.1:<0.30646.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:52] [ns_1@127.0.0.1:<0.30612.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:50:52] [ns_1@127.0.0.1:<0.30511.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:53] [ns_1@127.0.0.1:<0.30600.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:53] [ns_1@127.0.0.1:<0.30580.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30649.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30668.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:54] [ns_1@127.0.0.1:<0.30658.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:54] [ns_1@127.0.0.1:<0.30623.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:55] [ns_1@127.0.0.1:<0.30614.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:55] [ns_1@127.0.0.1:<0.30592.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:56] [ns_1@127.0.0.1:<0.30671.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:56] [ns_1@127.0.0.1:<0.30636.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:50:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30668.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30687.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:50:57] [ns_1@127.0.0.1:<0.30625.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:57] [ns_1@127.0.0.1:<0.30604.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:58] [ns_1@127.0.0.1:<0.30684.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:58] [ns_1@127.0.0.1:<0.30650.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:59] [ns_1@127.0.0.1:<0.30638.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:50:59] [ns_1@127.0.0.1:<0.30618.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:50:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.30511.1>} [ns_doctor:info] [2012-03-26 2:50:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,755459,749093}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38453528}, {processes,10663136}, {processes_used,9053792}, {system,27790392}, {atom,1306681}, {atom_used,1284164}, {binary,569704}, {code,12859877}, {ets,2476536}]}, {system_stats, [{cpu_utilization_rate,25.25}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,6481}, {memory_data,{4040077312,4012052480,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27368 kB\nBuffers: 64232 kB\nCached: 3525188 kB\nSwapCached: 0 kB\nActive: 312456 kB\nInactive: 3437812 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27368 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 44 kB\nWriteback: 0 kB\nAnonPages: 160964 kB\nMapped: 24872 kB\nSlab: 134304 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 578328 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3609792512}, {buffered_memory,65773568}, {free_memory,28024832}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{6476484,0}}, {context_switches,{1806630,0}}, {garbage_collection,{960179,1363648579,0}}, {io,{{input,28305833},{output,70493491}}}, {reductions,{379919447,600354}}, {run_queue,0}, {runtime,{82550,180}}]}]}] [error_logger:error] [2012-03-26 2:50:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30687.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:50:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30721.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:00] [ns_1@127.0.0.1:<0.30696.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:00] [ns_1@127.0.0.1:<0.30660.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:01] [ns_1@127.0.0.1:<0.30652.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:01] [ns_1@127.0.0.1:<0.30632.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:02] [ns_1@127.0.0.1:<0.30724.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:02] [ns_1@127.0.0.1:<0.30676.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:02] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30721.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:02] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30737.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:03] [ns_1@127.0.0.1:<0.30662.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:03] [ns_1@127.0.0.1:<0.30644.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:04] [ns_1@127.0.0.1:<0.30734.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:04] [ns_1@127.0.0.1:<0.30688.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:05] [ns_1@127.0.0.1:<0.30678.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:05] [ns_1@127.0.0.1:<0.30656.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30737.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30757.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:06] [ns_1@127.0.0.1:<0.30746.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:06] [ns_1@127.0.0.1:<0.30698.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:07] [ns_1@127.0.0.1:<0.30690.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:07] [ns_1@127.0.0.1:<0.30669.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:08] [ns_1@127.0.0.1:<0.30760.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:08] [ns_1@127.0.0.1:<0.30726.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:08] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30757.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:08] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30773.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:09] [ns_1@127.0.0.1:<0.30707.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:51:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.30511.1>} [stats:error] [2012-03-26 2:51:09] [ns_1@127.0.0.1:<0.30682.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:10] [ns_1@127.0.0.1:<0.30770.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:10] [ns_1@127.0.0.1:<0.30738.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:11] [ns_1@127.0.0.1:<0.30728.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:11] [ns_1@127.0.0.1:<0.30694.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30773.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30796.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:12] [ns_1@127.0.0.1:<0.30787.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:12] [ns_1@127.0.0.1:<0.30750.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:13] [ns_1@127.0.0.1:<0.30740.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:13] [ns_1@127.0.0.1:<0.30722.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:14] [ns_1@127.0.0.1:<0.30799.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:14] [ns_1@127.0.0.1:<0.30762.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30796.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30814.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:15] [ns_1@127.0.0.1:<0.30752.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:15] [ns_1@127.0.0.1:<0.30732.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30586.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30821.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:16] [ns_1@127.0.0.1:<0.30811.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:16] [ns_1@127.0.0.1:<0.30774.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:17] [ns_1@127.0.0.1:<0.30766.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30814.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30831.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:17] [ns_1@127.0.0.1:<0.30744.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:18] [ns_1@127.0.0.1:<0.30824.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:18] [ns_1@127.0.0.1:<0.30801.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:19] [ns_1@127.0.0.1:<0.30783.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:51:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.30511.1>} [stats:error] [2012-03-26 2:51:19] [ns_1@127.0.0.1:<0.30758.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:20] [ns_1@127.0.0.1:<0.30836.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30831.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30849.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:21] [ns_1@127.0.0.1:<0.30815.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:21] [ns_1@127.0.0.1:<0.30793.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:22] [ns_1@127.0.0.1:<0.30768.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:22] [ns_1@127.0.0.1:<0.30850.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:23] [ns_1@127.0.0.1:<0.30789.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:23] [ns_1@127.0.0.1:<0.30805.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30849.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30868.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:24] [ns_1@127.0.0.1:<0.30785.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:24] [ns_1@127.0.0.1:<0.30861.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:25] [ns_1@127.0.0.1:<0.30826.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:25] [ns_1@127.0.0.1:<0.30819.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:26] [ns_1@127.0.0.1:<0.30797.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:26] [ns_1@127.0.0.1:<0.30876.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30868.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30887.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:27] [ns_1@127.0.0.1:<0.30838.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:27] [ns_1@127.0.0.1:<0.30832.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:28] [ns_1@127.0.0.1:<0.30807.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:28] [ns_1@127.0.0.1:<0.30888.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:51:28] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:51:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.30511.1> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [stats:error] [2012-03-26 2:51:29] [ns_1@127.0.0.1:<0.30853.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:29] [ns_1@127.0.0.1:<0.30844.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30887.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30910.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:30] [ns_1@127.0.0.1:<0.30822.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:30] [ns_1@127.0.0.1:<0.30898.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:31] [ns_1@127.0.0.1:<0.30863.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:31] [ns_1@127.0.0.1:<0.30857.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:32] [ns_1@127.0.0.1:<0.30834.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:32] [ns_1@127.0.0.1:<0.30915.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30910.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30926.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:33] [ns_1@127.0.0.1:<0.30878.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:33] [ns_1@127.0.0.1:<0.30869.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:33] [ns_1@127.0.0.1:<0.30882.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:33] [ns_1@127.0.0.1:<0.30894.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:34] [ns_1@127.0.0.1:<0.30846.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:34] [ns_1@127.0.0.1:<0.30927.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:51:34] [ns_1@127.0.0.1:<0.30906.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:35] [ns_1@127.0.0.1:<0.30890.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:35] [ns_1@127.0.0.1:<0.30911.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30926.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30951.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:36] [ns_1@127.0.0.1:<0.30859.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:36] [ns_1@127.0.0.1:<0.30943.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:37] [ns_1@127.0.0.1:<0.30900.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:37] [ns_1@127.0.0.1:<0.30921.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:38] [ns_1@127.0.0.1:<0.30871.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:38] [ns_1@127.0.0.1:<0.30956.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30951.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30967.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:39] [ns_1@127.0.0.1:<0.30917.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:51:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.30906.1>} [stats:error] [2012-03-26 2:51:39] [ns_1@127.0.0.1:<0.30937.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:40] [ns_1@127.0.0.1:<0.30884.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:51:40] [ns_1@127.0.0.1:<0.30906.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:40] [ns_1@127.0.0.1:<0.30968.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:41] [ns_1@127.0.0.1:<0.30929.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30967.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.30987.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:41] [ns_1@127.0.0.1:<0.30952.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:42] [ns_1@127.0.0.1:<0.30896.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:42] [ns_1@127.0.0.1:<0.30981.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:43] [ns_1@127.0.0.1:<0.30931.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:44] [ns_1@127.0.0.1:<0.30962.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:44] [ns_1@127.0.0.1:<0.30913.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.30987.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31003.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:45] [ns_1@127.0.0.1:<0.30994.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:45] [ns_1@127.0.0.1:<0.30933.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:46] [ns_1@127.0.0.1:<0.30977.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:46] [ns_1@127.0.0.1:<0.30923.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:51:46] [ns_1@127.0.0.1:<0.30906.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:47] [ns_1@127.0.0.1:<0.31008.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:47] [ns_1@127.0.0.1:<0.30960.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:47] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31003.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:47] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31024.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:51:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.30906.1>} [stats:error] [2012-03-26 2:51:48] [ns_1@127.0.0.1:<0.30990.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31024.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31033.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-03-26 2:51:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.30821.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31034.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:51] [ns_1@127.0.0.1:<0.30945.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:52] [ns_1@127.0.0.1:<0.31000.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:51:52] [ns_1@127.0.0.1:<0.30906.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:52] [ns_1@127.0.0.1:<0.30979.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:53] [ns_1@127.0.0.1:<0.31018.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:53] [ns_1@127.0.0.1:<0.30970.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31033.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31051.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:54] [ns_1@127.0.0.1:<0.31014.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:54] [ns_1@127.0.0.1:<0.30992.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:55] [ns_1@127.0.0.1:<0.31037.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:55] [ns_1@127.0.0.1:<0.30988.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:56] [ns_1@127.0.0.1:<0.30939.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:51:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31051.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31068.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:51:56] [ns_1@127.0.0.1:<0.31006.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:57] [ns_1@127.0.0.1:<0.31048.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:57] [ns_1@127.0.0.1:<0.30998.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:58] [ns_1@127.0.0.1:<0.30954.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:58] [ns_1@127.0.0.1:<0.31016.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:51:59] [ns_1@127.0.0.1:<0.31062.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:51:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.30906.1>} [ns_doctor:info] [2012-03-26 2:51:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,755514,946514}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38181336}, {processes,10410336}, {processes_used,8801056}, {system,27771000}, {atom,1306681}, {atom_used,1284164}, {binary,576432}, {code,12859877}, {ets,2443808}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,6536}, {memory_data,{4040077312,4012052480,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27120 kB\nBuffers: 64340 kB\nCached: 3524912 kB\nSwapCached: 0 kB\nActive: 312368 kB\nInactive: 3437596 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27120 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 16 kB\nWriteback: 4 kB\nAnonPages: 160708 kB\nMapped: 24872 kB\nSlab: 134264 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 578096 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3609509888}, {buffered_memory,65884160}, {free_memory,27770880}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{6531682,0}}, {context_switches,{1818402,0}}, {garbage_collection,{966586,1373516589,0}}, {io,{{input,28561116},{output,71448072}}}, {reductions,{382519133,353174}}, {run_queue,0}, {runtime,{83230,100}}]}]}] [error_logger:error] [2012-03-26 2:51:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31068.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:51:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31087.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:00] [ns_1@127.0.0.1:<0.31012.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:00] [ns_1@127.0.0.1:<0.30964.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:01] [ns_1@127.0.0.1:<0.31043.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:01] [ns_1@127.0.0.1:<0.31075.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:02] [ns_1@127.0.0.1:<0.31025.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:02] [ns_1@127.0.0.1:<0.31041.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:02] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31087.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:02] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31105.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:03] [ns_1@127.0.0.1:<0.31058.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:03] [ns_1@127.0.0.1:<0.31090.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:04] [ns_1@127.0.0.1:<0.31039.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:04] [ns_1@127.0.0.1:<0.31054.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:05] [ns_1@127.0.0.1:<0.31071.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:05] [ns_1@127.0.0.1:<0.31100.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31105.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31126.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:06] [ns_1@127.0.0.1:<0.31052.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:06] [ns_1@127.0.0.1:<0.31069.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:07] [ns_1@127.0.0.1:<0.31081.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:07] [ns_1@127.0.0.1:<0.31112.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:08] [ns_1@127.0.0.1:<0.31065.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:08] [ns_1@127.0.0.1:<0.31079.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:08] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31126.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:08] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31142.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:09] [ns_1@127.0.0.1:<0.31096.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:09] [ns_1@127.0.0.1:<0.31127.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:52:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.30906.1>} [stats:error] [2012-03-26 2:52:10] [ns_1@127.0.0.1:<0.31077.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:10] [ns_1@127.0.0.1:<0.31094.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:11] [ns_1@127.0.0.1:<0.31108.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:11] [ns_1@127.0.0.1:<0.31137.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31142.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31164.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:12] [ns_1@127.0.0.1:<0.31092.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:12] [ns_1@127.0.0.1:<0.31106.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:13] [ns_1@127.0.0.1:<0.31121.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:13] [ns_1@127.0.0.1:<0.31153.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:14] [ns_1@127.0.0.1:<0.31102.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:14] [ns_1@127.0.0.1:<0.31119.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31164.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31180.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:15] [ns_1@127.0.0.1:<0.31133.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:15] [ns_1@127.0.0.1:<0.31165.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:16] [ns_1@127.0.0.1:<0.31114.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:16] [ns_1@127.0.0.1:<0.31131.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:17] [ns_1@127.0.0.1:<0.31145.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:17] [ns_1@127.0.0.1:<0.31175.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31180.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31200.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:18] [ns_1@127.0.0.1:<0.31129.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:18] [ns_1@127.0.0.1:<0.31143.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:19] [ns_1@127.0.0.1:<0.31159.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:52:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.30906.1>} [stats:error] [2012-03-26 2:52:19] [ns_1@127.0.0.1:<0.31189.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:20] [ns_1@127.0.0.1:<0.31139.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:20] [ns_1@127.0.0.1:<0.31157.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31200.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31219.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:21] [ns_1@127.0.0.1:<0.31171.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:21] [ns_1@127.0.0.1:<0.31201.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:22] [ns_1@127.0.0.1:<0.31155.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:22] [ns_1@127.0.0.1:<0.31169.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:23] [ns_1@127.0.0.1:<0.31185.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31219.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31235.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:24] [ns_1@127.0.0.1:<0.31214.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:24] [ns_1@127.0.0.1:<0.31167.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:25] [ns_1@127.0.0.1:<0.31183.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:25] [ns_1@127.0.0.1:<0.31197.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:26] [ns_1@127.0.0.1:<0.31226.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31034.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31250.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:26] [ns_1@127.0.0.1:<0.31177.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31235.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31254.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:27] [ns_1@127.0.0.1:<0.31193.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:27] [ns_1@127.0.0.1:<0.31209.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:28] [ns_1@127.0.0.1:<0.31238.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:28] [ns_1@127.0.0.1:<0.31191.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:52:28] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:52:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.30906.1> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [stats:error] [2012-03-26 2:52:29] [ns_1@127.0.0.1:<0.31205.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:29] [ns_1@127.0.0.1:<0.31224.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31254.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31275.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:30] [ns_1@127.0.0.1:<0.31251.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:30] [ns_1@127.0.0.1:<0.31203.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:31] [ns_1@127.0.0.1:<0.31220.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:31] [ns_1@127.0.0.1:<0.31236.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:32] [ns_1@127.0.0.1:<0.31263.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:32] [ns_1@127.0.0.1:<0.31216.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31275.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31294.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:33] [ns_1@127.0.0.1:<0.31230.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:33] [ns_1@127.0.0.1:<0.31248.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:34] [ns_1@127.0.0.1:<0.31280.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:34] [ns_1@127.0.0.1:<0.31291.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:34] [ns_1@127.0.0.1:<0.31228.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:34] [ns_1@127.0.0.1:<0.31242.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:52:34] [ns_1@127.0.0.1:<0.31271.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:35] [ns_1@127.0.0.1:<0.31244.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:35] [ns_1@127.0.0.1:<0.31261.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31294.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31320.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:36] [ns_1@127.0.0.1:<0.31307.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:36] [ns_1@127.0.0.1:<0.31255.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:37] [ns_1@127.0.0.1:<0.31257.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:37] [ns_1@127.0.0.1:<0.31278.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:38] [ns_1@127.0.0.1:<0.31323.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:38] [ns_1@127.0.0.1:<0.31265.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31320.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31336.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:39] [ns_1@127.0.0.1:<0.31267.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:39] [ns_1@127.0.0.1:<0.31289.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:52:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.31271.1>} [stats:error] [2012-03-26 2:52:40] [ns_1@127.0.0.1:<0.31333.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:52:40] [ns_1@127.0.0.1:<0.31271.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:40] [ns_1@127.0.0.1:<0.31283.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:41] [ns_1@127.0.0.1:<0.31285.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:41] [ns_1@127.0.0.1:<0.31301.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31336.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31357.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:42] [ns_1@127.0.0.1:<0.31347.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:42] [ns_1@127.0.0.1:<0.31295.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:43] [ns_1@127.0.0.1:<0.31297.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:43] [ns_1@127.0.0.1:<0.31303.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:44] [ns_1@127.0.0.1:<0.31360.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:44] [ns_1@127.0.0.1:<0.31312.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31357.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31373.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:45] [ns_1@127.0.0.1:<0.31314.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:45] [ns_1@127.0.0.1:<0.31305.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:46] [ns_1@127.0.0.1:<0.31370.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:52:46] [ns_1@127.0.0.1:<0.31271.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:46] [ns_1@127.0.0.1:<0.31325.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:47] [ns_1@127.0.0.1:<0.31327.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:47] [ns_1@127.0.0.1:<0.31321.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:47] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31373.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:47] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31394.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:48] [ns_1@127.0.0.1:<0.31384.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:48] [ns_1@127.0.0.1:<0.31337.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:49] [ns_1@127.0.0.1:<0.31339.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:52:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.31271.1>} [stats:error] [2012-03-26 2:52:49] [ns_1@127.0.0.1:<0.31331.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:50] [ns_1@127.0.0.1:<0.31397.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:50] [ns_1@127.0.0.1:<0.31349.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31394.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31413.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:51] [ns_1@127.0.0.1:<0.31354.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:51] [ns_1@127.0.0.1:<0.31345.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:52] [ns_1@127.0.0.1:<0.31410.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:52:52] [ns_1@127.0.0.1:<0.31271.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:52] [ns_1@127.0.0.1:<0.31362.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:53] [ns_1@127.0.0.1:<0.31366.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31413.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31430.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:54] [ns_1@127.0.0.1:<0.31358.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:54] [ns_1@127.0.0.1:<0.31422.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:56] [ns_1@127.0.0.1:<0.31376.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31430.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31442.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:52:57] [ns_1@127.0.0.1:<0.31386.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:57] [ns_1@127.0.0.1:<0.31380.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:58] [ns_1@127.0.0.1:<0.31368.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:58] [ns_1@127.0.0.1:<0.31443.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:52:59] [ns_1@127.0.0.1:<0.31399.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:52:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.31271.1>} [ns_doctor:info] [2012-03-26 2:52:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,755574,981514}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38553128}, {processes,10752296}, {processes_used,9145416}, {system,27800832}, {atom,1306681}, {atom_used,1284164}, {binary,569680}, {code,12859877}, {ets,2472584}]}, {system_stats, [{cpu_utilization_rate,25.56390977443609}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,6596}, {memory_data,{4040077312,4012306432,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26688 kB\nBuffers: 64436 kB\nCached: 3525188 kB\nSwapCached: 0 kB\nActive: 312584 kB\nInactive: 3437740 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26688 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 56 kB\nWriteback: 0 kB\nAnonPages: 160712 kB\nMapped: 24872 kB\nSlab: 134308 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580076 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3609792512}, {buffered_memory,65982464}, {free_memory,27328512}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{6591716,1}}, {context_switches,{1830511,0}}, {garbage_collection,{973535,1384064840,0}}, {io,{{input,28571095},{output,71817799}}}, {reductions,{384974754,617384}}, {run_queue,0}, {runtime,{83970,190}}]}]}] [stats:error] [2012-03-26 2:52:59] [ns_1@127.0.0.1:<0.31391.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:52:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31442.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:52:59] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31461.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:00] [ns_1@127.0.0.1:<0.31382.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:00] [ns_1@127.0.0.1:<0.31453.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31250.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31468.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:01] [ns_1@127.0.0.1:<0.31414.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:01] [ns_1@127.0.0.1:<0.31403.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:02] [ns_1@127.0.0.1:<0.31395.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:02] [ns_1@127.0.0.1:<0.31469.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:02] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31461.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:02] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31480.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:03] [ns_1@127.0.0.1:<0.31424.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:03] [ns_1@127.0.0.1:<0.31418.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:04] [ns_1@127.0.0.1:<0.31408.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31480.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31495.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:06] [ns_1@127.0.0.1:<0.31433.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:07] [ns_1@127.0.0.1:<0.31481.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:07] [ns_1@127.0.0.1:<0.31445.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:08] [ns_1@127.0.0.1:<0.31449.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:08] [ns_1@127.0.0.1:<0.31439.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:08] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31495.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:08] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31510.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:09] [ns_1@127.0.0.1:<0.31501.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:09] [ns_1@127.0.0.1:<0.31458.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:53:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.31271.1>} [stats:error] [2012-03-26 2:53:10] [ns_1@127.0.0.1:<0.31464.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:10] [ns_1@127.0.0.1:<0.31451.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:11] [ns_1@127.0.0.1:<0.31513.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:11] [ns_1@127.0.0.1:<0.31473.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31510.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31532.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:12] [ns_1@127.0.0.1:<0.31475.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:12] [ns_1@127.0.0.1:<0.31466.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:13] [ns_1@127.0.0.1:<0.31527.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:13] [ns_1@127.0.0.1:<0.31485.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:14] [ns_1@127.0.0.1:<0.31487.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:14] [ns_1@127.0.0.1:<0.31477.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31532.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:14] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31548.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:15] [ns_1@127.0.0.1:<0.31539.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:15] [ns_1@127.0.0.1:<0.31431.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:16] [ns_1@127.0.0.1:<0.31420.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:16] [ns_1@127.0.0.1:<0.31499.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:17] [ns_1@127.0.0.1:<0.31553.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:17] [ns_1@127.0.0.1:<0.31505.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31548.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:17] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31568.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:18] [ns_1@127.0.0.1:<0.31497.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:18] [ns_1@127.0.0.1:<0.31511.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:19] [ns_1@127.0.0.1:<0.31563.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:19] [ns_1@127.0.0.1:<0.31521.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:53:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.31271.1>} [stats:error] [2012-03-26 2:53:20] [ns_1@127.0.0.1:<0.31507.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:20] [ns_1@127.0.0.1:<0.31525.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31568.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:20] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31587.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:21] [ns_1@127.0.0.1:<0.31575.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:21] [ns_1@127.0.0.1:<0.31533.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:22] [ns_1@127.0.0.1:<0.31523.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:22] [ns_1@127.0.0.1:<0.31537.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:23] [ns_1@127.0.0.1:<0.31590.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:23] [ns_1@127.0.0.1:<0.31543.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31587.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31605.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:24] [ns_1@127.0.0.1:<0.31535.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:24] [ns_1@127.0.0.1:<0.31551.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:25] [ns_1@127.0.0.1:<0.31600.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:25] [ns_1@127.0.0.1:<0.31557.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:26] [ns_1@127.0.0.1:<0.31545.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:26] [ns_1@127.0.0.1:<0.31561.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31605.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31623.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:27] [ns_1@127.0.0.1:<0.31616.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:27] [ns_1@127.0.0.1:<0.31569.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:28] [ns_1@127.0.0.1:<0.31559.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:53:28] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:53:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.31271.1> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [stats:error] [2012-03-26 2:53:28] [ns_1@127.0.0.1:<0.31573.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:29] [ns_1@127.0.0.1:<0.31628.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:29] [ns_1@127.0.0.1:<0.31582.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31623.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:29] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31644.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:30] [ns_1@127.0.0.1:<0.31571.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:30] [ns_1@127.0.0.1:<0.31588.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:31] [ns_1@127.0.0.1:<0.31641.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:31] [ns_1@127.0.0.1:<0.31594.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:32] [ns_1@127.0.0.1:<0.31584.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:32] [ns_1@127.0.0.1:<0.31598.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31644.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31662.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:33] [ns_1@127.0.0.1:<0.31655.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:33] [ns_1@127.0.0.1:<0.31606.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:34] [ns_1@127.0.0.1:<0.31596.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:34] [ns_1@127.0.0.1:<0.31608.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:34] [ns_1@127.0.0.1:<0.31620.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:34] [ns_1@127.0.0.1:<0.31632.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:34] [ns_1@127.0.0.1:<0.31649.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:53:34] [ns_1@127.0.0.1:<0.31638.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:34] [ns_1@127.0.0.1:<0.31612.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:35] [ns_1@127.0.0.1:<0.31667.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31662.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:35] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31690.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:36] [ns_1@127.0.0.1:<0.31618.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31468.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31693.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:36] [ns_1@127.0.0.1:<0.31659.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:37] [ns_1@127.0.0.1:<0.31624.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:37] [ns_1@127.0.0.1:<0.31691.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:38] [ns_1@127.0.0.1:<0.31630.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:38] [ns_1@127.0.0.1:<0.31679.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31690.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31707.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:39] [ns_1@127.0.0.1:<0.31634.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:39] [ns_1@127.0.0.1:<0.31702.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:53:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.31638.1>} [stats:error] [2012-03-26 2:53:40] [ns_1@127.0.0.1:<0.31647.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:40] [ns_1@127.0.0.1:<0.31696.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:53:40] [ns_1@127.0.0.1:<0.31638.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:41] [ns_1@127.0.0.1:<0.31651.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:41] [ns_1@127.0.0.1:<0.31716.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31707.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31729.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:42] [ns_1@127.0.0.1:<0.31657.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:42] [ns_1@127.0.0.1:<0.31708.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:43] [ns_1@127.0.0.1:<0.31663.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:43] [ns_1@127.0.0.1:<0.31730.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:44] [ns_1@127.0.0.1:<0.31669.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:44] [ns_1@127.0.0.1:<0.31721.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31729.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:44] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31745.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:45] [ns_1@127.0.0.1:<0.31684.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:45] [ns_1@127.0.0.1:<0.31740.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:46] [ns_1@127.0.0.1:<0.31671.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:46] [ns_1@127.0.0.1:<0.31734.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:53:46] [ns_1@127.0.0.1:<0.31638.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:47] [ns_1@127.0.0.1:<0.31698.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:47] [ns_1@127.0.0.1:<0.31754.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:47] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31745.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:47] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31766.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:48] [ns_1@127.0.0.1:<0.31673.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:48] [ns_1@127.0.0.1:<0.31748.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:49] [ns_1@127.0.0.1:<0.31710.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:49] [ns_1@127.0.0.1:<0.31767.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:53:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.31638.1>} [stats:error] [2012-03-26 2:53:50] [ns_1@127.0.0.1:<0.31675.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:50] [ns_1@127.0.0.1:<0.31758.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31766.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:50] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31785.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:51] [ns_1@127.0.0.1:<0.31723.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:51] [ns_1@127.0.0.1:<0.31780.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:52] [ns_1@127.0.0.1:<0.31677.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:52] [ns_1@127.0.0.1:<0.31771.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:53:52] [ns_1@127.0.0.1:<0.31638.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:53] [ns_1@127.0.0.1:<0.31736.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:53] [ns_1@127.0.0.1:<0.31792.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:53] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31785.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31804.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:54] [ns_1@127.0.0.1:<0.31694.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:54] [ns_1@127.0.0.1:<0.31786.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:55] [ns_1@127.0.0.1:<0.31750.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:55] [ns_1@127.0.0.1:<0.31805.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:56] [ns_1@127.0.0.1:<0.31704.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:56] [ns_1@127.0.0.1:<0.31796.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:53:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31804.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:53:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31822.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:53:57] [ns_1@127.0.0.1:<0.31760.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:57] [ns_1@127.0.0.1:<0.31817.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:58] [ns_1@127.0.0.1:<0.31718.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:58] [ns_1@127.0.0.1:<0.31811.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:53:59] [ns_1@127.0.0.1:<0.31773.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:53:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.31638.1>} [ns_doctor:info] [2012-03-26 2:53:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,755635,16461}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38453608}, {processes,10674936}, {processes_used,9066672}, {system,27778672}, {atom,1306681}, {atom_used,1284164}, {binary,573664}, {code,12859877}, {ets,2442336}]}, {system_stats, [{cpu_utilization_rate,25.56390977443609}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,6656}, {memory_data,{4040077312,4012621824,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26316 kB\nBuffers: 64512 kB\nCached: 3525352 kB\nSwapCached: 0 kB\nActive: 312680 kB\nInactive: 3437892 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26316 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 60 kB\nWriteback: 0 kB\nAnonPages: 160712 kB\nMapped: 24872 kB\nSlab: 134324 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 580076 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3609960448}, {buffered_memory,66060288}, {free_memory,26947584}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{6651753,1}}, {context_switches,{1842536,0}}, {garbage_collection,{979791,1394420265,0}}, {io,{{input,28581065},{output,72184829}}}, {reductions,{387325289,618688}}, {run_queue,0}, {runtime,{84700,220}}]}]}] [stats:error] [2012-03-26 2:53:59] [ns_1@127.0.0.1:<0.31829.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31822.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31856.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:00] [ns_1@127.0.0.1:<0.31732.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:00] [ns_1@127.0.0.1:<0.31823.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:01] [ns_1@127.0.0.1:<0.31788.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:01] [ns_1@127.0.0.1:<0.31859.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:02] [ns_1@127.0.0.1:<0.31742.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:02] [ns_1@127.0.0.1:<0.31833.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31856.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:05] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31874.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-03-26 2:54:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31874.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31883.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:06] [ns_1@127.0.0.1:<0.31869.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:06] [ns_1@127.0.0.1:<0.31756.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:07] [ns_1@127.0.0.1:<0.31863.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:07] [ns_1@127.0.0.1:<0.31800.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:08] [ns_1@127.0.0.1:<0.31886.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:08] [ns_1@127.0.0.1:<0.31769.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31883.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31899.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:09] [ns_1@127.0.0.1:<0.31890.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:09] [ns_1@127.0.0.1:<0.31815.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:54:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.31638.1>} [stats:error] [2012-03-26 2:54:10] [ns_1@127.0.0.1:<0.31896.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:10] [ns_1@127.0.0.1:<0.31782.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31693.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31914.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:11] [ns_1@127.0.0.1:<0.31902.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:11] [ns_1@127.0.0.1:<0.31827.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31899.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31922.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-03-26 2:54:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31922.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31926.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:16] [ns_1@127.0.0.1:<0.31884.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:16] [ns_1@127.0.0.1:<0.31912.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:17] [ns_1@127.0.0.1:<0.31888.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:17] [ns_1@127.0.0.1:<0.31917.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31926.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31943.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:18] [ns_1@127.0.0.1:<0.31894.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:18] [ns_1@127.0.0.1:<0.31794.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:19] [ns_1@127.0.0.1:<0.31900.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:19] [ns_1@127.0.0.1:<0.31853.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:54:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.31638.1>} [stats:error] [2012-03-26 2:54:20] [ns_1@127.0.0.1:<0.31910.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:20] [ns_1@127.0.0.1:<0.31807.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31943.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31962.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:21] [ns_1@127.0.0.1:<0.31938.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:21] [ns_1@127.0.0.1:<0.31867.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:22] [ns_1@127.0.0.1:<0.31934.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:22] [ns_1@127.0.0.1:<0.31819.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:23] [ns_1@127.0.0.1:<0.31950.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:23] [ns_1@127.0.0.1:<0.31931.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31962.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31980.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:24] [ns_1@127.0.0.1:<0.31946.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:24] [ns_1@127.0.0.1:<0.31831.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:25] [ns_1@127.0.0.1:<0.31965.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:25] [ns_1@127.0.0.1:<0.31944.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:26] [ns_1@127.0.0.1:<0.31959.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:26] [ns_1@127.0.0.1:<0.31861.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31980.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.31998.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:27] [ns_1@127.0.0.1:<0.31975.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:27] [ns_1@127.0.0.1:<0.31957.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:28] [ns_1@127.0.0.1:<0.31971.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:28] [ns_1@127.0.0.1:<0.31871.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:54:28] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:54:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.31638.1> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [stats:error] [2012-03-26 2:54:29] [ns_1@127.0.0.1:<0.31989.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:29] [ns_1@127.0.0.1:<0.31969.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.31998.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32019.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:30] [ns_1@127.0.0.1:<0.31983.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:30] [ns_1@127.0.0.1:<0.31936.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:31] [ns_1@127.0.0.1:<0.32001.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:31] [ns_1@127.0.0.1:<0.31981.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:32] [ns_1@127.0.0.1:<0.31995.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:32] [ns_1@127.0.0.1:<0.31948.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32019.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32037.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:33] [ns_1@127.0.0.1:<0.32011.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:33] [ns_1@127.0.0.1:<0.31993.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:34] [ns_1@127.0.0.1:<0.32007.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:54:34] [ns_1@127.0.0.1:<0.32015.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:34] [ns_1@127.0.0.1:<0.31915.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:34] [ns_1@127.0.0.1:<0.31963.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:34] [ns_1@127.0.0.1:<0.31999.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:34] [ns_1@127.0.0.1:<0.32009.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:54:34] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason shutdown [ns_server:info] [2012-03-26 2:54:34] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 2:54:34] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 2:54:34] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:54:34] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:54:34] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [user:info] [2012-03-26 2:54:36] [ns_1@127.0.0.1:<0.32072.1>:menelaus_web_alerts_srv:global_alert:64] Approaching full disk warning. Usage of disk "/" on node "127.0.0.1" is around 100%. [ns_server:warn] [2012-03-26 2:54:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 2:54:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 2:54:39] [ns_1@127.0.0.1:<0.32026.1>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 2:54:39] [ns_1@127.0.0.1:<0.32044.1>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 2:54:39] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 2:54:39] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 2:54:39] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:54:39] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:54:39] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [stats:error] [2012-03-26 2:54:39] [ns_1@127.0.0.1:<0.32024.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:54:39] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [ns_server:info] [2012-03-26 2:54:39] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 2:54:39] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 2:54:39] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 2:54:40] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 02:54:41: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 2:54:40] [ns_1@127.0.0.1:<0.32034.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:40] [ns_1@127.0.0.1:<0.32038.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:41] [ns_1@127.0.0.1:<0.32028.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:41] [ns_1@127.0.0.1:<0.32098.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32037.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32109.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:42] [ns_1@127.0.0.1:<0.32046.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:42] [ns_1@127.0.0.1:<0.32102.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:43] [ns_1@127.0.0.1:<0.32042.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:43] [ns_1@127.0.0.1:<0.32110.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:44] [ns_1@127.0.0.1:<0.32048.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:54:44] [ns_1@127.0.0.1:<0.32087.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:44] [ns_1@127.0.0.1:<0.32114.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32109.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32126.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:45] [ns_1@127.0.0.1:<0.32080.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:45] [ns_1@127.0.0.1:<0.32121.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:46] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.31914.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:46] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32136.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:46] [ns_1@127.0.0.1:<0.32050.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:47] [ns_1@127.0.0.1:<0.32129.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:47] [ns_1@127.0.0.1:<0.32081.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32126.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32146.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:48] [ns_1@127.0.0.1:<0.32137.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:48] [ns_1@127.0.0.1:<0.32053.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:49] [ns_1@127.0.0.1:<0.32141.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:49] [ns_1@127.0.0.1:<0.32082.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:54:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.32087.1>} [stats:error] [2012-03-26 2:54:50] [ns_1@127.0.0.1:<0.32149.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:50] [ns_1@127.0.0.1:<0.32056.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:54:50] [ns_1@127.0.0.1:<0.32087.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32146.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32165.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:51] [ns_1@127.0.0.1:<0.32153.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:51] [ns_1@127.0.0.1:<0.32083.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:52] [ns_1@127.0.0.1:<0.32162.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:52] [ns_1@127.0.0.1:<0.32058.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:53] [ns_1@127.0.0.1:<0.32169.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:53] [ns_1@127.0.0.1:<0.32091.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32165.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32185.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:54] [ns_1@127.0.0.1:<0.32176.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:54] [ns_1@127.0.0.1:<0.32060.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:55] [ns_1@127.0.0.1:<0.32180.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:55] [ns_1@127.0.0.1:<0.32106.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:56] [ns_1@127.0.0.1:<0.32188.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:56] [ns_1@127.0.0.1:<0.32100.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:54:56] [ns_1@127.0.0.1:<0.32087.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:54:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32185.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:54:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32203.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:54:57] [ns_1@127.0.0.1:<0.32194.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:57] [ns_1@127.0.0.1:<0.32119.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:58] [ns_1@127.0.0.1:<0.32200.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:58] [ns_1@127.0.0.1:<0.32112.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:59] [ns_1@127.0.0.1:<0.32206.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:54:59] [ns_1@127.0.0.1:<0.32134.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:54:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.32087.1>} [ns_doctor:info] [2012-03-26 2:54:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,755695,48384}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38454840}, {processes,10611104}, {processes_used,9002840}, {system,27843736}, {atom,1306681}, {atom_used,1284164}, {binary,604904}, {code,12859877}, {ets,2473440}]}, {system_stats, [{cpu_utilization_rate,25.75}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,6716}, {memory_data,{4040077312,4013129728,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 26068 kB\nBuffers: 64648 kB\nCached: 3525488 kB\nSwapCached: 0 kB\nActive: 312780 kB\nInactive: 3438076 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 26068 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 72 kB\nWriteback: 0 kB\nAnonPages: 160724 kB\nMapped: 24872 kB\nSlab: 134404 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579776 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3610099712}, {buffered_memory,66199552}, {free_memory,26693632}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{6711786,1}}, {context_switches,{1854915,0}}, {garbage_collection,{986268,1404856444,0}}, {io,{{input,28853905},{output,72829630}}}, {reductions,{389693109,578942}}, {run_queue,0}, {runtime,{85440,190}}]}]}] [error_logger:error] [2012-03-26 2:55:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32203.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32223.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:00] [ns_1@127.0.0.1:<0.32213.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:00] [ns_1@127.0.0.1:<0.32123.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:01] [ns_1@127.0.0.1:<0.32217.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:01] [ns_1@127.0.0.1:<0.32147.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:02] [ns_1@127.0.0.1:<0.32228.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:02] [ns_1@127.0.0.1:<0.32139.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:55:02] [ns_1@127.0.0.1:<0.32087.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32223.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32241.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:03] [ns_1@127.0.0.1:<0.32232.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:03] [ns_1@127.0.0.1:<0.32160.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:04] [ns_1@127.0.0.1:<0.32238.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:04] [ns_1@127.0.0.1:<0.32151.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:05] [ns_1@127.0.0.1:<0.32244.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:05] [ns_1@127.0.0.1:<0.32174.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32241.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32263.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:06] [ns_1@127.0.0.1:<0.32251.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:06] [ns_1@127.0.0.1:<0.32167.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:07] [ns_1@127.0.0.1:<0.32260.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:07] [ns_1@127.0.0.1:<0.32186.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:08] [ns_1@127.0.0.1:<0.32266.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:08] [ns_1@127.0.0.1:<0.32178.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32263.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32279.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:09] [ns_1@127.0.0.1:<0.32272.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:55:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.32087.1>} [stats:error] [2012-03-26 2:55:09] [ns_1@127.0.0.1:<0.32198.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:10] [ns_1@127.0.0.1:<0.32276.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:10] [ns_1@127.0.0.1:<0.32192.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:11] [ns_1@127.0.0.1:<0.32286.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:11] [ns_1@127.0.0.1:<0.32211.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32279.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32301.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:12] [ns_1@127.0.0.1:<0.32292.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:12] [ns_1@127.0.0.1:<0.32204.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:13] [ns_1@127.0.0.1:<0.32298.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:13] [ns_1@127.0.0.1:<0.32226.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:14] [ns_1@127.0.0.1:<0.32304.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:14] [ns_1@127.0.0.1:<0.32215.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32301.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32317.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:15] [ns_1@127.0.0.1:<0.32310.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:15] [ns_1@127.0.0.1:<0.32236.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:16] [ns_1@127.0.0.1:<0.32314.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:16] [ns_1@127.0.0.1:<0.32230.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:17] [ns_1@127.0.0.1:<0.32324.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:17] [ns_1@127.0.0.1:<0.32249.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32317.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32337.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:18] [ns_1@127.0.0.1:<0.32328.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:18] [ns_1@127.0.0.1:<0.32242.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:19] [ns_1@127.0.0.1:<0.32334.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:55:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.32087.1>} [stats:error] [2012-03-26 2:55:20] [ns_1@127.0.0.1:<0.32264.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:20] [ns_1@127.0.0.1:<0.32340.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32337.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32354.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:21] [ns_1@127.0.0.1:<0.32256.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32136.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32357.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:21] [ns_1@127.0.0.1:<0.32346.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:22] [ns_1@127.0.0.1:<0.32274.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:22] [ns_1@127.0.0.1:<0.32355.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:23] [ns_1@127.0.0.1:<0.32268.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:23] [ns_1@127.0.0.1:<0.32362.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32354.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32373.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:24] [ns_1@127.0.0.1:<0.32290.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:24] [ns_1@127.0.0.1:<0.32366.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:25] [ns_1@127.0.0.1:<0.32280.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:25] [ns_1@127.0.0.1:<0.32374.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:26] [ns_1@127.0.0.1:<0.32302.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:26] [ns_1@127.0.0.1:<0.32380.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32373.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32392.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:27] [ns_1@127.0.0.1:<0.32294.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:27] [ns_1@127.0.0.1:<0.32387.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:28] [ns_1@127.0.0.1:<0.32312.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:28] [ns_1@127.0.0.1:<0.32393.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:55:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.32087.1>} [error_logger:error] [2012-03-26 2:55:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32392.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:31] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32406.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:32] [ns_1@127.0.0.1:<0.32399.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:32] [ns_1@127.0.0.1:<0.32351.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:33] [ns_1@127.0.0.1:<0.32306.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32406.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32418.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:33] [ns_1@127.0.0.1:<0.32358.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:34] [ns_1@127.0.0.1:<0.32326.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:34] [ns_1@127.0.0.1:<0.32364.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:35] [ns_1@127.0.0.1:<0.32320.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:35] [ns_1@127.0.0.1:<0.32368.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32418.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32437.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:36] [ns_1@127.0.0.1:<0.32338.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:36] [ns_1@127.0.0.1:<0.32376.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:37] [ns_1@127.0.0.1:<0.32330.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:37] [ns_1@127.0.0.1:<0.32382.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:38] [ns_1@127.0.0.1:<0.32413.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:38] [ns_1@127.0.0.1:<0.32389.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:55:38] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:55:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.32087.1> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [error_logger:error] [2012-03-26 2:55:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32437.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32453.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:39] [ns_1@127.0.0.1:<0.32342.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:39] [ns_1@127.0.0.1:<0.32395.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:40] [ns_1@127.0.0.1:<0.32425.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32453.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32470.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:55:44] [ns_1@127.0.0.1:<0.32460.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32470.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32474.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-03-26 2:55:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32474.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32483.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 2:55:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.32460.1>} [stats:error] [2012-03-26 2:55:50] [ns_1@127.0.0.1:<0.32440.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:50] [ns_1@127.0.0.1:<0.32401.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:55:50] [ns_1@127.0.0.1:<0.32460.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32483.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32494.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:51] [ns_1@127.0.0.1:<0.32419.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:51] [ns_1@127.0.0.1:<0.32411.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:52] [ns_1@127.0.0.1:<0.32450.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:52] [ns_1@127.0.0.1:<0.32415.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:53] [ns_1@127.0.0.1:<0.32432.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:53] [ns_1@127.0.0.1:<0.32423.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32494.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32513.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:54] [ns_1@127.0.0.1:<0.32491.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:54] [ns_1@127.0.0.1:<0.32427.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:55] [ns_1@127.0.0.1:<0.32444.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:55] [ns_1@127.0.0.1:<0.32438.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32357.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32526.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:56] [ns_1@127.0.0.1:<0.32504.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:56] [ns_1@127.0.0.1:<0.32442.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:55:56] [ns_1@127.0.0.1:<0.32460.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:55:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32513.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:55:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32532.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:55:57] [ns_1@127.0.0.1:<0.32456.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:57] [ns_1@127.0.0.1:<0.32448.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:58] [ns_1@127.0.0.1:<0.32516.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:58] [ns_1@127.0.0.1:<0.32454.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:55:59] [ns_1@127.0.0.1:<0.32497.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:55:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.32460.1>} [ns_doctor:info] [2012-03-26 2:55:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,755755,80408}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38353200}, {processes,10540472}, {processes_used,8932208}, {system,27812728}, {atom,1306681}, {atom_used,1284164}, {binary,599616}, {code,12859877}, {ets,2444152}]}, {system_stats, [{cpu_utilization_rate,25.897435897435898}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,6776}, {memory_data,{4040077312,4013383680,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25820 kB\nBuffers: 64720 kB\nCached: 3525632 kB\nSwapCached: 0 kB\nActive: 312852 kB\nInactive: 3438232 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25820 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 76 kB\nWriteback: 0 kB\nAnonPages: 160728 kB\nMapped: 24872 kB\nSlab: 134292 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579776 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3610247168}, {buffered_memory,66273280}, {free_memory,26439680}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{6771817,1}}, {context_switches,{1865967,0}}, {garbage_collection,{991890,1413619579,0}}, {io,{{input,28863857},{output,73154660}}}, {reductions,{391756870,369431}}, {run_queue,0}, {runtime,{86100,140}}]}]}] [stats:error] [2012-03-26 2:55:59] [ns_1@127.0.0.1:<0.32465.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32532.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32552.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:00] [ns_1@127.0.0.1:<0.32529.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:00] [ns_1@127.0.0.1:<0.32495.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:01] [ns_1@127.0.0.1:<0.32508.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:01] [ns_1@127.0.0.1:<0.32489.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:02] [ns_1@127.0.0.1:<0.32542.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:56:02] [ns_1@127.0.0.1:<0.32460.1>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:02] [ns_1@127.0.0.1:<0.32506.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32552.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32571.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:03] [ns_1@127.0.0.1:<0.32522.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:03] [ns_1@127.0.0.1:<0.32502.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:04] [ns_1@127.0.0.1:<0.32557.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:04] [ns_1@127.0.0.1:<0.32520.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:05] [ns_1@127.0.0.1:<0.32535.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:05] [ns_1@127.0.0.1:<0.32514.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32571.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32593.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:06] [ns_1@127.0.0.1:<0.32568.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:06] [ns_1@127.0.0.1:<0.32533.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:07] [ns_1@127.0.0.1:<0.32549.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:07] [ns_1@127.0.0.1:<0.32527.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:08] [ns_1@127.0.0.1:<0.32581.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:08] [ns_1@127.0.0.1:<0.32544.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32593.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32609.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:09] [ns_1@127.0.0.1:<0.32564.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:56:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.32460.1>} [stats:error] [2012-03-26 2:56:09] [ns_1@127.0.0.1:<0.32540.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:10] [ns_1@127.0.0.1:<0.32596.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:10] [ns_1@127.0.0.1:<0.32560.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:11] [ns_1@127.0.0.1:<0.32577.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:11] [ns_1@127.0.0.1:<0.32555.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32609.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32631.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:12] [ns_1@127.0.0.1:<0.32606.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:13] [ns_1@127.0.0.1:<0.32572.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:13] [ns_1@127.0.0.1:<0.32590.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:14] [ns_1@127.0.0.1:<0.32566.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:14] [ns_1@127.0.0.1:<0.32622.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:15] [ns_1@127.0.0.1:<0.32586.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32631.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32647.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:15] [ns_1@127.0.0.1:<0.32602.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:16] [ns_1@127.0.0.1:<0.32579.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:16] [ns_1@127.0.0.1:<0.32634.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:17] [ns_1@127.0.0.1:<0.32598.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:17] [ns_1@127.0.0.1:<0.32616.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32647.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32665.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:18] [ns_1@127.0.0.1:<0.32594.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:18] [ns_1@127.0.0.1:<0.32644.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:19] [ns_1@127.0.0.1:<0.32610.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:19] [ns_1@127.0.0.1:<0.32628.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:56:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.32460.1>} [stats:error] [2012-03-26 2:56:20] [ns_1@127.0.0.1:<0.32604.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:20] [ns_1@127.0.0.1:<0.32658.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32665.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32684.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:21] [ns_1@127.0.0.1:<0.32624.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:21] [ns_1@127.0.0.1:<0.32640.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:22] [ns_1@127.0.0.1:<0.32620.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:22] [ns_1@127.0.0.1:<0.32670.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:23] [ns_1@127.0.0.1:<0.32636.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:23] [ns_1@127.0.0.1:<0.32654.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32684.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32702.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:24] [ns_1@127.0.0.1:<0.32632.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:24] [ns_1@127.0.0.1:<0.32685.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:25] [ns_1@127.0.0.1:<0.32650.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:25] [ns_1@127.0.0.1:<0.32666.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:26] [ns_1@127.0.0.1:<0.32642.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:26] [ns_1@127.0.0.1:<0.32695.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32702.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32720.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:27] [ns_1@127.0.0.1:<0.32660.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:27] [ns_1@127.0.0.1:<0.32679.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:28] [ns_1@127.0.0.1:<0.32656.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:28] [ns_1@127.0.0.1:<0.32709.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:29] [ns_1@127.0.0.1:<0.32672.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:56:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.32460.1>} [stats:error] [2012-03-26 2:56:29] [ns_1@127.0.0.1:<0.32691.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32720.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32738.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:30] [ns_1@127.0.0.1:<0.32668.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:30] [ns_1@127.0.0.1:<0.32721.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:31] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32526.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:31] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32745.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:31] [ns_1@127.0.0.1:<0.32687.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:31] [ns_1@127.0.0.1:<0.32703.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:32] [ns_1@127.0.0.1:<0.32681.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:32] [ns_1@127.0.0.1:<0.32731.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32738.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.32757.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:33] [ns_1@127.0.0.1:<0.32697.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:33] [ns_1@127.0.0.1:<0.32715.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:34] [ns_1@127.0.0.1:<0.32693.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:34] [ns_1@127.0.0.1:<0.32746.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:35] [ns_1@127.0.0.1:<0.32711.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:35] [ns_1@127.0.0.1:<0.32727.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.32757.1>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.11.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:36] [ns_1@127.0.0.1:<0.32705.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:36] [ns_1@127.0.0.1:<0.32758.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:37] [ns_1@127.0.0.1:<0.32723.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:37] [ns_1@127.0.0.1:<0.32741.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:38] [ns_1@127.0.0.1:<0.32717.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:56:38] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:56:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.32460.1> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [stats:error] [2012-03-26 2:56:38] [ns_1@127.0.0.1:<0.3.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.11.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.27.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:39] [ns_1@127.0.0.1:<0.32735.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:39] [ns_1@127.0.0.1:<0.32752.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:40] [ns_1@127.0.0.1:<0.32729.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:40] [ns_1@127.0.0.1:<0.16.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:41] [ns_1@127.0.0.1:<0.32750.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:42] [ns_1@127.0.0.1:<0.32764.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.27.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.50.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:42] [ns_1@127.0.0.1:<0.32743.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:43] [ns_1@127.0.0.1:<0.28.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:43] [ns_1@127.0.0.1:<0.32762.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:44] [ns_1@127.0.0.1:<0.12.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:44] [ns_1@127.0.0.1:<0.32754.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:56:44] [ns_1@127.0.0.1:<0.32.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:45] [ns_1@127.0.0.1:<0.43.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.50.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.66.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:45] [ns_1@127.0.0.1:<0.7.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:46] [ns_1@127.0.0.1:<0.22.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:46] [ns_1@127.0.0.1:<0.32766.1>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:47] [ns_1@127.0.0.1:<0.55.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:47] [ns_1@127.0.0.1:<0.20.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.66.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.85.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:48] [ns_1@127.0.0.1:<0.39.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:48] [ns_1@127.0.0.1:<0.14.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:49] [ns_1@127.0.0.1:<0.69.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:49] [ns_1@127.0.0.1:<0.35.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:56:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.32.2>} [stats:error] [2012-03-26 2:56:50] [ns_1@127.0.0.1:<0.51.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:50] [ns_1@127.0.0.1:<0.24.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:50] [ns_1@127.0.0.1:<0.41.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:50] [ns_1@127.0.0.1:<0.53.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:56:50] [ns_1@127.0.0.1:<0.32.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.85.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.108.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:51] [ns_1@127.0.0.1:<0.80.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:51] [ns_1@127.0.0.1:<0.47.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:52] [ns_1@127.0.0.1:<0.61.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:52] [ns_1@127.0.0.1:<0.63.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:53] [ns_1@127.0.0.1:<0.92.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:53] [ns_1@127.0.0.1:<0.59.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.108.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.127.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:54] [ns_1@127.0.0.1:<0.76.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:54] [ns_1@127.0.0.1:<0.78.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:55] [ns_1@127.0.0.1:<0.111.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:55] [ns_1@127.0.0.1:<0.74.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:56] [ns_1@127.0.0.1:<0.88.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:56] [ns_1@127.0.0.1:<0.90.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:56:56] [ns_1@127.0.0.1:<0.32.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:56:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.127.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:56:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.145.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:56:57] [ns_1@127.0.0.1:<0.122.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:57] [ns_1@127.0.0.1:<0.86.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:58] [ns_1@127.0.0.1:<0.101.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:58] [ns_1@127.0.0.1:<0.109.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:56:59] [ns_1@127.0.0.1:<0.136.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:56:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.32.2>} [stats:error] [2012-03-26 2:56:59] [ns_1@127.0.0.1:<0.99.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 2:56:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,755815,110756}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38333928}, {processes,10485176}, {processes_used,8876912}, {system,27848752}, {atom,1306681}, {atom_used,1284164}, {binary,600384}, {code,12859877}, {ets,2473008}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,6836}, {memory_data,{4040077312,4013510656,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25820 kB\nBuffers: 64792 kB\nCached: 3525804 kB\nSwapCached: 0 kB\nActive: 312924 kB\nInactive: 3438392 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25820 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 72 kB\nWriteback: 0 kB\nAnonPages: 160728 kB\nMapped: 24872 kB\nSlab: 134324 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 579776 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3610423296}, {buffered_memory,66347008}, {free_memory,26439680}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{6831847,0}}, {context_switches,{1878083,0}}, {garbage_collection,{998141,1424135288,0}}, {io,{{input,28873836},{output,73523768}}}, {reductions,{394123680,613568}}, {run_queue,0}, {runtime,{86850,190}}]}]}] [error_logger:error] [2012-03-26 2:57:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.145.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.178.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:00] [ns_1@127.0.0.1:<0.103.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:00] [ns_1@127.0.0.1:<0.120.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:01] [ns_1@127.0.0.1:<0.148.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:01] [ns_1@127.0.0.1:<0.116.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:02] [ns_1@127.0.0.1:<0.105.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:57:02] [ns_1@127.0.0.1:<0.32.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:02] [ns_1@127.0.0.1:<0.134.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.178.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.196.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:03] [ns_1@127.0.0.1:<0.167.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:03] [ns_1@127.0.0.1:<0.128.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:04] [ns_1@127.0.0.1:<0.118.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:04] [ns_1@127.0.0.1:<0.146.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:05] [ns_1@127.0.0.1:<0.189.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:05] [ns_1@127.0.0.1:<0.140.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.32745.1>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.217.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-03-26 2:57:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.196.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.219.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:06] [ns_1@127.0.0.1:<0.130.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:06] [ns_1@127.0.0.1:<0.157.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:07] [ns_1@127.0.0.1:<0.202.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:07] [ns_1@127.0.0.1:<0.153.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:08] [ns_1@127.0.0.1:<0.142.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:08] [ns_1@127.0.0.1:<0.185.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.219.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.235.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:09] [ns_1@127.0.0.1:<0.215.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:57:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.32.2>} [stats:error] [2012-03-26 2:57:10] [ns_1@127.0.0.1:<0.155.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:11] [ns_1@127.0.0.1:<0.197.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:11] [ns_1@127.0.0.1:<0.228.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:12] [ns_1@127.0.0.1:<0.181.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.235.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.256.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:12] [ns_1@127.0.0.1:<0.183.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:13] [ns_1@127.0.0.1:<0.211.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:13] [ns_1@127.0.0.1:<0.253.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:14] [ns_1@127.0.0.1:<0.193.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.256.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.268.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:15] [ns_1@127.0.0.1:<0.224.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:15] [ns_1@127.0.0.1:<0.191.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:16] [ns_1@127.0.0.1:<0.204.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:16] [ns_1@127.0.0.1:<0.206.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:17] [ns_1@127.0.0.1:<0.236.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:17] [ns_1@127.0.0.1:<0.277.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.268.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.288.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:18] [ns_1@127.0.0.1:<0.220.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:18] [ns_1@127.0.0.1:<0.222.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:19] [ns_1@127.0.0.1:<0.249.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:57:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.32.2>} [stats:error] [2012-03-26 2:57:19] [ns_1@127.0.0.1:<0.289.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:20] [ns_1@127.0.0.1:<0.230.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:20] [ns_1@127.0.0.1:<0.232.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.288.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.307.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:21] [ns_1@127.0.0.1:<0.261.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:21] [ns_1@127.0.0.1:<0.302.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:22] [ns_1@127.0.0.1:<0.244.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:22] [ns_1@127.0.0.1:<0.246.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:23] [ns_1@127.0.0.1:<0.273.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:23] [ns_1@127.0.0.1:<0.314.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.307.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.325.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:24] [ns_1@127.0.0.1:<0.257.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:24] [ns_1@127.0.0.1:<0.259.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:25] [ns_1@127.0.0.1:<0.285.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:25] [ns_1@127.0.0.1:<0.326.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:26] [ns_1@127.0.0.1:<0.265.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:26] [ns_1@127.0.0.1:<0.271.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.325.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.343.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:27] [ns_1@127.0.0.1:<0.297.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:27] [ns_1@127.0.0.1:<0.338.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:28] [ns_1@127.0.0.1:<0.279.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:28] [ns_1@127.0.0.1:<0.281.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:29] [ns_1@127.0.0.1:<0.312.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:57:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.32.2>} [stats:error] [2012-03-26 2:57:29] [ns_1@127.0.0.1:<0.350.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.343.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.361.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:30] [ns_1@127.0.0.1:<0.291.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:30] [ns_1@127.0.0.1:<0.293.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:31] [ns_1@127.0.0.1:<0.322.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:32] [ns_1@127.0.0.1:<0.364.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:32] [ns_1@127.0.0.1:<0.304.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:33] [ns_1@127.0.0.1:<0.308.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.361.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.379.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:33] [ns_1@127.0.0.1:<0.336.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:34] [ns_1@127.0.0.1:<0.374.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:34] [ns_1@127.0.0.1:<0.316.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:35] [ns_1@127.0.0.1:<0.318.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:35] [ns_1@127.0.0.1:<0.348.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:36] [ns_1@127.0.0.1:<0.386.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.379.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.400.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:36] [ns_1@127.0.0.1:<0.328.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:37] [ns_1@127.0.0.1:<0.332.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:37] [ns_1@127.0.0.1:<0.358.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:38] [ns_1@127.0.0.1:<0.401.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:38] [ns_1@127.0.0.1:<0.340.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:57:38] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:57:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.32.2> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [error_logger:error] [2012-03-26 2:57:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.400.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.414.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:39] [ns_1@127.0.0.1:<0.344.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:39] [ns_1@127.0.0.1:<0.372.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:40] [ns_1@127.0.0.1:<0.411.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:40] [ns_1@127.0.0.1:<0.352.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.217.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.430.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-03-26 2:57:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.414.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:43] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.434.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:44] [ns_1@127.0.0.1:<0.397.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:44] [ns_1@127.0.0.1:<0.428.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:57:44] [ns_1@127.0.0.1:<0.421.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:45] [ns_1@127.0.0.1:<0.403.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.434.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.444.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:45] [ns_1@127.0.0.1:<0.405.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:46] [ns_1@127.0.0.1:<0.409.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:46] [ns_1@127.0.0.1:<0.366.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:47] [ns_1@127.0.0.1:<0.415.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:47] [ns_1@127.0.0.1:<0.417.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.444.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.464.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:48] [ns_1@127.0.0.1:<0.426.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:48] [ns_1@127.0.0.1:<0.376.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:49] [ns_1@127.0.0.1:<0.354.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:49] [ns_1@127.0.0.1:<0.384.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:57:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.421.2>} [stats:error] [2012-03-26 2:57:50] [ns_1@127.0.0.1:<0.439.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:50] [ns_1@127.0.0.1:<0.388.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:50] [ns_1@127.0.0.1:<0.441.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:50] [ns_1@127.0.0.1:<0.457.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:57:50] [ns_1@127.0.0.1:<0.421.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.464.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.487.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:51] [ns_1@127.0.0.1:<0.368.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:51] [ns_1@127.0.0.1:<0.437.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:52] [ns_1@127.0.0.1:<0.455.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:52] [ns_1@127.0.0.1:<0.469.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:53] [ns_1@127.0.0.1:<0.380.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:53] [ns_1@127.0.0.1:<0.453.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.487.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.506.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:54] [ns_1@127.0.0.1:<0.467.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:54] [ns_1@127.0.0.1:<0.488.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:55] [ns_1@127.0.0.1:<0.393.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:55] [ns_1@127.0.0.1:<0.465.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:56] [ns_1@127.0.0.1:<0.480.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:56] [ns_1@127.0.0.1:<0.499.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:57:56] [ns_1@127.0.0.1:<0.421.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:57:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.506.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:57:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.524.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:57:57] [ns_1@127.0.0.1:<0.447.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:57] [ns_1@127.0.0.1:<0.478.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:58] [ns_1@127.0.0.1:<0.482.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:58] [ns_1@127.0.0.1:<0.513.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:57:59] [ns_1@127.0.0.1:<0.459.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:57:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.421.2>} [ns_doctor:info] [2012-03-26 2:57:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,755875,142346}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38324208}, {processes,10493064}, {processes_used,8884800}, {system,27831144}, {atom,1306681}, {atom_used,1284164}, {binary,603584}, {code,12859877}, {ets,2446200}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,6896}, {memory_data,{4040077312,4013637632,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25572 kB\nBuffers: 64892 kB\nCached: 3525528 kB\nSwapCached: 0 kB\nActive: 313016 kB\nInactive: 3438140 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25572 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 8 kB\nWriteback: 0 kB\nAnonPages: 160752 kB\nMapped: 24872 kB\nSlab: 134280 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582464 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3610140672}, {buffered_memory,66449408}, {free_memory,26185728}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{6891878,1}}, {context_switches,{1890591,0}}, {garbage_collection,{1005101,1434552705,0}}, {io,{{input,29129583},{output,74443615}}}, {reductions,{396886730,548705}}, {run_queue,0}, {runtime,{87530,130}}]}]}] [stats:error] [2012-03-26 2:57:59] [ns_1@127.0.0.1:<0.495.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.524.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.544.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:00] [ns_1@127.0.0.1:<0.484.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:00] [ns_1@127.0.0.1:<0.525.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:01] [ns_1@127.0.0.1:<0.471.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:01] [ns_1@127.0.0.1:<0.507.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:02] [ns_1@127.0.0.1:<0.497.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:58:02] [ns_1@127.0.0.1:<0.421.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:02] [ns_1@127.0.0.1:<0.536.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.544.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.562.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:03] [ns_1@127.0.0.1:<0.490.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:03] [ns_1@127.0.0.1:<0.519.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:04] [ns_1@127.0.0.1:<0.509.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:04] [ns_1@127.0.0.1:<0.551.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:05] [ns_1@127.0.0.1:<0.501.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:05] [ns_1@127.0.0.1:<0.532.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.562.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.584.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:06] [ns_1@127.0.0.1:<0.521.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:06] [ns_1@127.0.0.1:<0.563.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:07] [ns_1@127.0.0.1:<0.515.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:07] [ns_1@127.0.0.1:<0.547.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:08] [ns_1@127.0.0.1:<0.534.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:08] [ns_1@127.0.0.1:<0.577.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.584.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.600.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:09] [ns_1@127.0.0.1:<0.530.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:58:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.421.2>} [stats:error] [2012-03-26 2:58:09] [ns_1@127.0.0.1:<0.557.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:10] [ns_1@127.0.0.1:<0.549.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:10] [ns_1@127.0.0.1:<0.589.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:11] [ns_1@127.0.0.1:<0.541.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:12] [ns_1@127.0.0.1:<0.570.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.600.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.622.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:12] [ns_1@127.0.0.1:<0.559.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:13] [ns_1@127.0.0.1:<0.601.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:13] [ns_1@127.0.0.1:<0.555.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:14] [ns_1@127.0.0.1:<0.585.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:14] [ns_1@127.0.0.1:<0.572.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:15] [ns_1@127.0.0.1:<0.615.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.622.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.638.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:15] [ns_1@127.0.0.1:<0.568.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.430.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.645.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:16] [ns_1@127.0.0.1:<0.595.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:16] [ns_1@127.0.0.1:<0.587.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:17] [ns_1@127.0.0.1:<0.627.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:17] [ns_1@127.0.0.1:<0.581.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.638.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.657.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:18] [ns_1@127.0.0.1:<0.611.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:18] [ns_1@127.0.0.1:<0.597.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:19] [ns_1@127.0.0.1:<0.641.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:19] [ns_1@127.0.0.1:<0.593.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:58:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.421.2>} [stats:error] [2012-03-26 2:58:20] [ns_1@127.0.0.1:<0.623.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:20] [ns_1@127.0.0.1:<0.613.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.657.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.677.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:21] [ns_1@127.0.0.1:<0.652.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:21] [ns_1@127.0.0.1:<0.607.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:22] [ns_1@127.0.0.1:<0.633.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:22] [ns_1@127.0.0.1:<0.625.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:23] [ns_1@127.0.0.1:<0.664.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:23] [ns_1@127.0.0.1:<0.619.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.677.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.695.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:24] [ns_1@127.0.0.1:<0.648.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:24] [ns_1@127.0.0.1:<0.635.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:25] [ns_1@127.0.0.1:<0.680.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:25] [ns_1@127.0.0.1:<0.631.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:26] [ns_1@127.0.0.1:<0.660.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:26] [ns_1@127.0.0.1:<0.650.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.695.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.713.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:27] [ns_1@127.0.0.1:<0.690.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:27] [ns_1@127.0.0.1:<0.646.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:28] [ns_1@127.0.0.1:<0.673.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:28] [ns_1@127.0.0.1:<0.662.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:29] [ns_1@127.0.0.1:<0.704.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:58:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.421.2>} [stats:error] [2012-03-26 2:58:29] [ns_1@127.0.0.1:<0.658.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.713.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.731.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:30] [ns_1@127.0.0.1:<0.686.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:30] [ns_1@127.0.0.1:<0.678.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:31] [ns_1@127.0.0.1:<0.716.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:31] [ns_1@127.0.0.1:<0.668.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:32] [ns_1@127.0.0.1:<0.698.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:32] [ns_1@127.0.0.1:<0.688.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.731.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.749.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:33] [ns_1@127.0.0.1:<0.728.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:33] [ns_1@127.0.0.1:<0.684.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:34] [ns_1@127.0.0.1:<0.710.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:34] [ns_1@127.0.0.1:<0.702.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:35] [ns_1@127.0.0.1:<0.742.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:35] [ns_1@127.0.0.1:<0.696.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.749.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.770.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:36] [ns_1@127.0.0.1:<0.722.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:36] [ns_1@127.0.0.1:<0.714.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:37] [ns_1@127.0.0.1:<0.754.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:37] [ns_1@127.0.0.1:<0.708.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:38] [ns_1@127.0.0.1:<0.736.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:58:38] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:58:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.421.2> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [stats:error] [2012-03-26 2:58:38] [ns_1@127.0.0.1:<0.724.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.770.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.786.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:39] [ns_1@127.0.0.1:<0.767.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:39] [ns_1@127.0.0.1:<0.720.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:40] [ns_1@127.0.0.1:<0.746.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:40] [ns_1@127.0.0.1:<0.738.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:41] [ns_1@127.0.0.1:<0.779.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:41] [ns_1@127.0.0.1:<0.734.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.786.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.809.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:42] [ns_1@127.0.0.1:<0.758.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:42] [ns_1@127.0.0.1:<0.750.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:43] [ns_1@127.0.0.1:<0.794.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:43] [ns_1@127.0.0.1:<0.744.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:44] [ns_1@127.0.0.1:<0.773.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:58:44] [ns_1@127.0.0.1:<0.791.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:45] [ns_1@127.0.0.1:<0.763.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.809.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.825.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:45] [ns_1@127.0.0.1:<0.806.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:46] [ns_1@127.0.0.1:<0.756.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:46] [ns_1@127.0.0.1:<0.783.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:47] [ns_1@127.0.0.1:<0.775.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:47] [ns_1@127.0.0.1:<0.818.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:48] [ns_1@127.0.0.1:<0.771.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.825.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.846.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:48] [ns_1@127.0.0.1:<0.800.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:49] [ns_1@127.0.0.1:<0.787.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:49] [ns_1@127.0.0.1:<0.833.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:58:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.791.2>} [stats:error] [2012-03-26 2:58:50] [ns_1@127.0.0.1:<0.781.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:50] [ns_1@127.0.0.1:<0.812.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:58:50] [ns_1@127.0.0.1:<0.791.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.645.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.862.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:51] [ns_1@127.0.0.1:<0.802.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:51] [ns_1@127.0.0.1:<0.814.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.846.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.868.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:51] [ns_1@127.0.0.1:<0.828.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:51] [ns_1@127.0.0.1:<0.843.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:52] [ns_1@127.0.0.1:<0.798.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:52] [ns_1@127.0.0.1:<0.822.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:53] [ns_1@127.0.0.1:<0.839.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:53] [ns_1@127.0.0.1:<0.855.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.868.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.887.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:54] [ns_1@127.0.0.1:<0.810.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:54] [ns_1@127.0.0.1:<0.837.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:55] [ns_1@127.0.0.1:<0.851.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:55] [ns_1@127.0.0.1:<0.876.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:56] [ns_1@127.0.0.1:<0.820.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:56] [ns_1@127.0.0.1:<0.849.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:58:56] [ns_1@127.0.0.1:<0.791.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:58:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.887.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:58:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.906.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:58:57] [ns_1@127.0.0.1:<0.871.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:57] [ns_1@127.0.0.1:<0.888.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:58] [ns_1@127.0.0.1:<0.835.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:58] [ns_1@127.0.0.1:<0.863.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:59] [ns_1@127.0.0.1:<0.882.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:58:59] [ns_1@127.0.0.1:<0.901.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:58:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.791.2>} [ns_doctor:info] [2012-03-26 2:58:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,755935,169441}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38533160}, {processes,10668880}, {processes_used,9059600}, {system,27864280}, {atom,1306681}, {atom_used,1284164}, {binary,601032}, {code,12859877}, {ets,2474976}]}, {system_stats, [{cpu_utilization_rate,25.5}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,6956}, {memory_data,{4040077312,4013891584,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25448 kB\nBuffers: 64968 kB\nCached: 3525692 kB\nSwapCached: 0 kB\nActive: 313492 kB\nInactive: 3438236 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25448 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 20 kB\nWriteback: 64 kB\nAnonPages: 160972 kB\nMapped: 24872 kB\nSlab: 134304 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582464 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3610308608}, {buffered_memory,66527232}, {free_memory,26058752}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{6951907,0}}, {context_switches,{1902814,0}}, {garbage_collection,{1011437,1445273024,0}}, {io,{{input,29139562},{output,74817303}}}, {reductions,{399285947,621011}}, {run_queue,0}, {runtime,{88230,180}}]}]}] [error_logger:error] [2012-03-26 2:59:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.906.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.926.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:00] [ns_1@127.0.0.1:<0.847.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:00] [ns_1@127.0.0.1:<0.865.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:01] [ns_1@127.0.0.1:<0.896.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:01] [ns_1@127.0.0.1:<0.914.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:02] [ns_1@127.0.0.1:<0.860.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:59:02] [ns_1@127.0.0.1:<0.791.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:02] [ns_1@127.0.0.1:<0.869.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.926.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.944.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:03] [ns_1@127.0.0.1:<0.909.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:03] [ns_1@127.0.0.1:<0.929.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:04] [ns_1@127.0.0.1:<0.878.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:04] [ns_1@127.0.0.1:<0.880.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:05] [ns_1@127.0.0.1:<0.921.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:05] [ns_1@127.0.0.1:<0.939.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.944.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.966.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:06] [ns_1@127.0.0.1:<0.890.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:06] [ns_1@127.0.0.1:<0.894.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:07] [ns_1@127.0.0.1:<0.937.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:07] [ns_1@127.0.0.1:<0.952.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:08] [ns_1@127.0.0.1:<0.903.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:08] [ns_1@127.0.0.1:<0.907.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.966.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.982.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:09] [ns_1@127.0.0.1:<0.950.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:59:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.791.2>} [stats:error] [2012-03-26 2:59:09] [ns_1@127.0.0.1:<0.967.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:10] [ns_1@127.0.0.1:<0.916.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:10] [ns_1@127.0.0.1:<0.918.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:11] [ns_1@127.0.0.1:<0.963.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:11] [ns_1@127.0.0.1:<0.977.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.982.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1004.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:12] [ns_1@127.0.0.1:<0.931.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:12] [ns_1@127.0.0.1:<0.933.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:13] [ns_1@127.0.0.1:<0.975.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:13] [ns_1@127.0.0.1:<0.993.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:14] [ns_1@127.0.0.1:<0.941.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:14] [ns_1@127.0.0.1:<0.945.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1004.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1020.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:15] [ns_1@127.0.0.1:<0.989.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:16] [ns_1@127.0.0.1:<0.1005.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:16] [ns_1@127.0.0.1:<0.954.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:17] [ns_1@127.0.0.1:<0.959.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:17] [ns_1@127.0.0.1:<0.1001.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:18] [ns_1@127.0.0.1:<0.1015.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1020.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1040.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:18] [ns_1@127.0.0.1:<0.969.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:19] [ns_1@127.0.0.1:<0.971.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:19] [ns_1@127.0.0.1:<0.1013.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:59:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.791.2>} [stats:error] [2012-03-26 2:59:20] [ns_1@127.0.0.1:<0.1029.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:20] [ns_1@127.0.0.1:<0.979.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:21] [ns_1@127.0.0.1:<0.983.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1040.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1059.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:21] [ns_1@127.0.0.1:<0.1027.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:22] [ns_1@127.0.0.1:<0.1041.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:22] [ns_1@127.0.0.1:<0.995.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:23] [ns_1@127.0.0.1:<0.997.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:23] [ns_1@127.0.0.1:<0.1037.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1059.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1075.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:24] [ns_1@127.0.0.1:<0.1054.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:24] [ns_1@127.0.0.1:<0.1007.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:25] [ns_1@127.0.0.1:<0.1009.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:25] [ns_1@127.0.0.1:<0.1049.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.862.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1088.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:26] [ns_1@127.0.0.1:<0.1066.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:26] [ns_1@127.0.0.1:<0.1017.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1075.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1094.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:27] [ns_1@127.0.0.1:<0.1023.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:27] [ns_1@127.0.0.1:<0.1064.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:28] [ns_1@127.0.0.1:<0.1078.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:28] [ns_1@127.0.0.1:<0.1031.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:29] [ns_1@127.0.0.1:<0.1033.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:29] [ns_1@127.0.0.1:<0.1076.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:59:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.791.2>} [error_logger:error] [2012-03-26 2:59:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1094.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1112.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:30] [ns_1@127.0.0.1:<0.1091.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:30] [ns_1@127.0.0.1:<0.1043.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:31] [ns_1@127.0.0.1:<0.1045.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:31] [ns_1@127.0.0.1:<0.1089.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:32] [ns_1@127.0.0.1:<0.1103.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:32] [ns_1@127.0.0.1:<0.1056.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1112.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1131.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:33] [ns_1@127.0.0.1:<0.1060.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:33] [ns_1@127.0.0.1:<0.1101.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:34] [ns_1@127.0.0.1:<0.1117.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:34] [ns_1@127.0.0.1:<0.1068.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:35] [ns_1@127.0.0.1:<0.1070.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:35] [ns_1@127.0.0.1:<0.1115.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1131.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1152.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:36] [ns_1@127.0.0.1:<0.1128.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:36] [ns_1@127.0.0.1:<0.1080.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:37] [ns_1@127.0.0.1:<0.1084.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:37] [ns_1@127.0.0.1:<0.1126.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:38] [ns_1@127.0.0.1:<0.1140.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:59:38] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 2:59:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.791.2> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [stats:error] [2012-03-26 2:59:38] [ns_1@127.0.0.1:<0.1095.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1152.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1168.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:39] [ns_1@127.0.0.1:<0.1097.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:39] [ns_1@127.0.0.1:<0.1138.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:40] [ns_1@127.0.0.1:<0.1155.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:40] [ns_1@127.0.0.1:<0.1105.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:41] [ns_1@127.0.0.1:<0.1107.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:41] [ns_1@127.0.0.1:<0.1153.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1168.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1191.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:42] [ns_1@127.0.0.1:<0.1165.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:42] [ns_1@127.0.0.1:<0.1120.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:43] [ns_1@127.0.0.1:<0.1122.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:43] [ns_1@127.0.0.1:<0.1163.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:44] [ns_1@127.0.0.1:<0.1182.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:59:44] [ns_1@127.0.0.1:<0.1173.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:44] [ns_1@127.0.0.1:<0.1132.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1191.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1207.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:45] [ns_1@127.0.0.1:<0.1136.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:45] [ns_1@127.0.0.1:<0.1180.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:46] [ns_1@127.0.0.1:<0.1194.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:47] [ns_1@127.0.0.1:<0.1145.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:47] [ns_1@127.0.0.1:<0.1149.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:48] [ns_1@127.0.0.1:<0.1192.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1207.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1228.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:48] [ns_1@127.0.0.1:<0.1204.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:49] [ns_1@127.0.0.1:<0.1157.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:49] [ns_1@127.0.0.1:<0.1161.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:59:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.1173.2>} [stats:error] [2012-03-26 2:59:50] [ns_1@127.0.0.1:<0.1202.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:50] [ns_1@127.0.0.1:<0.1219.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:59:50] [ns_1@127.0.0.1:<0.1173.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:51] [ns_1@127.0.0.1:<0.1169.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1228.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1247.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:51] [ns_1@127.0.0.1:<0.1176.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:51] [ns_1@127.0.0.1:<0.1188.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:51] [ns_1@127.0.0.1:<0.1200.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:51] [ns_1@127.0.0.1:<0.1215.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:51] [ns_1@127.0.0.1:<0.1225.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:52] [ns_1@127.0.0.1:<0.1217.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:52] [ns_1@127.0.0.1:<0.1231.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:53] [ns_1@127.0.0.1:<0.1184.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:53] [ns_1@127.0.0.1:<0.1237.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1247.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1272.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:54] [ns_1@127.0.0.1:<0.1229.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:54] [ns_1@127.0.0.1:<0.1244.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:55] [ns_1@127.0.0.1:<0.1196.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:55] [ns_1@127.0.0.1:<0.1261.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:56] [ns_1@127.0.0.1:<0.1242.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:56] [ns_1@127.0.0.1:<0.1265.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:59:56] [ns_1@127.0.0.1:<0.1173.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 2:59:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1272.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 2:59:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1290.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 2:59:57] [ns_1@127.0.0.1:<0.1210.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:57] [ns_1@127.0.0.1:<0.1273.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:58] [ns_1@127.0.0.1:<0.1263.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:58] [ns_1@127.0.0.1:<0.1277.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:59] [ns_1@127.0.0.1:<0.1221.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 2:59:59] [ns_1@127.0.0.1:<0.1285.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 2:59:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.1173.2>} [ns_doctor:info] [2012-03-26 2:59:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,755995,200292}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38509880}, {processes,10664000}, {processes_used,9055736}, {system,27845880}, {atom,1306681}, {atom_used,1284164}, {binary,603392}, {code,12859877}, {ets,2447048}]}, {system_stats, [{cpu_utilization_rate,25.12562814070352}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,7016}, {memory_data,{4040077312,4014018560,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25448 kB\nBuffers: 65032 kB\nCached: 3525852 kB\nSwapCached: 0 kB\nActive: 313328 kB\nInactive: 3438316 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25448 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 84 kB\nWriteback: 0 kB\nAnonPages: 160756 kB\nMapped: 24872 kB\nSlab: 134272 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582464 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3610472448}, {buffered_memory,66592768}, {free_memory,26058752}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{7011937,0}}, {context_switches,{1915013,0}}, {garbage_collection,{1017761,1455858895,0}}, {io,{{input,29149568},{output,75194644}}}, {reductions,{401686141,614322}}, {run_queue,0}, {runtime,{88890,170}}]}]}] [error_logger:error] [2012-03-26 3:00:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1290.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1325.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:00] [ns_1@127.0.0.1:<0.1275.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:00] [ns_1@127.0.0.1:<0.1291.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:00:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1088.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1332.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:01] [ns_1@127.0.0.1:<0.1233.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:01] [ns_1@127.0.0.1:<0.1298.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:02] [ns_1@127.0.0.1:<0.1287.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:02] [ns_1@127.0.0.1:<0.1302.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:00:02] [ns_1@127.0.0.1:<0.1173.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:00:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1325.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1344.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:03] [ns_1@127.0.0.1:<0.1248.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:03] [ns_1@127.0.0.1:<0.1328.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:04] [ns_1@127.0.0.1:<0.1300.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:04] [ns_1@127.0.0.1:<0.1333.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:05] [ns_1@127.0.0.1:<0.1250.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:05] [ns_1@127.0.0.1:<0.1339.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:00:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1344.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1367.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:06] [ns_1@127.0.0.1:<0.1330.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:06] [ns_1@127.0.0.1:<0.1345.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:07] [ns_1@127.0.0.1:<0.1252.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:07] [ns_1@127.0.0.1:<0.1352.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:08] [ns_1@127.0.0.1:<0.1341.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:08] [ns_1@127.0.0.1:<0.1359.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:00:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1367.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1383.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:09] [ns_1@127.0.0.1:<0.1254.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:00:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.1173.2>} [stats:error] [2012-03-26 3:00:09] [ns_1@127.0.0.1:<0.1368.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:10] [ns_1@127.0.0.1:<0.1354.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:10] [ns_1@127.0.0.1:<0.1372.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:11] [ns_1@127.0.0.1:<0.1256.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:11] [ns_1@127.0.0.1:<0.1378.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:00:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1383.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1405.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:12] [ns_1@127.0.0.1:<0.1370.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:12] [ns_1@127.0.0.1:<0.1384.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:13] [ns_1@127.0.0.1:<0.1267.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:13] [ns_1@127.0.0.1:<0.1394.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:14] [ns_1@127.0.0.1:<0.1380.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:14] [ns_1@127.0.0.1:<0.1398.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:00:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1405.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1421.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:15] [ns_1@127.0.0.1:<0.1281.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:15] [ns_1@127.0.0.1:<0.1406.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:16] [ns_1@127.0.0.1:<0.1396.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:16] [ns_1@127.0.0.1:<0.1410.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:17] [ns_1@127.0.0.1:<0.1319.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:18] [ns_1@127.0.0.1:<0.1416.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:00:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1421.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1441.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:18] [ns_1@127.0.0.1:<0.1408.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:19] [ns_1@127.0.0.1:<0.1424.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:19] [ns_1@127.0.0.1:<0.1293.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:00:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.1173.2>} [stats:error] [2012-03-26 3:00:20] [ns_1@127.0.0.1:<0.1430.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:20] [ns_1@127.0.0.1:<0.1418.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:21] [ns_1@127.0.0.1:<0.1434.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:00:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1441.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1460.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:21] [ns_1@127.0.0.1:<0.1335.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:22] [ns_1@127.0.0.1:<0.1442.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:22] [ns_1@127.0.0.1:<0.1432.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:23] [ns_1@127.0.0.1:<0.1446.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:23] [ns_1@127.0.0.1:<0.1347.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:24] [ns_1@127.0.0.1:<0.1455.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:00:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1460.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1478.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:24] [ns_1@127.0.0.1:<0.1444.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:25] [ns_1@127.0.0.1:<0.1461.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:25] [ns_1@127.0.0.1:<0.1363.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:26] [ns_1@127.0.0.1:<0.1467.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:26] [ns_1@127.0.0.1:<0.1457.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:27] [ns_1@127.0.0.1:<0.1471.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:00:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1478.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1496.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:27] [ns_1@127.0.0.1:<0.1376.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:28] [ns_1@127.0.0.1:<0.1479.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:28] [ns_1@127.0.0.1:<0.1469.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:29] [ns_1@127.0.0.1:<0.1485.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:29] [ns_1@127.0.0.1:<0.1390.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:00:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.1173.2>} [error_logger:error] [2012-03-26 3:00:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1496.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1512.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:30] [ns_1@127.0.0.1:<0.1491.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:30] [ns_1@127.0.0.1:<0.1481.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:31] [ns_1@127.0.0.1:<0.1497.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:31] [ns_1@127.0.0.1:<0.1402.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:32] [ns_1@127.0.0.1:<0.1503.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:32] [ns_1@127.0.0.1:<0.1493.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:00:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1512.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1530.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:33] [ns_1@127.0.0.1:<0.1507.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:33] [ns_1@127.0.0.1:<0.1414.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:34] [ns_1@127.0.0.1:<0.1517.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:34] [ns_1@127.0.0.1:<0.1505.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:35] [ns_1@127.0.0.1:<0.1521.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:35] [ns_1@127.0.0.1:<0.1428.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:00:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1332.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1550.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-03-26 3:00:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1530.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1552.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:36] [ns_1@127.0.0.1:<0.1527.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:36] [ns_1@127.0.0.1:<0.1519.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:37] [ns_1@127.0.0.1:<0.1533.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:37] [ns_1@127.0.0.1:<0.1438.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:38] [ns_1@127.0.0.1:<0.1539.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:00:38] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 3:00:38] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1173.2> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [stats:error] [2012-03-26 3:00:38] [ns_1@127.0.0.1:<0.1531.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:00:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1552.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1568.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:39] [ns_1@127.0.0.1:<0.1546.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:39] [ns_1@127.0.0.1:<0.1450.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:40] [ns_1@127.0.0.1:<0.1555.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:40] [ns_1@127.0.0.1:<0.1544.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:41] [ns_1@127.0.0.1:<0.1561.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:41] [ns_1@127.0.0.1:<0.1465.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:00:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1568.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1592.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:42] [ns_1@127.0.0.1:<0.1565.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:42] [ns_1@127.0.0.1:<0.1557.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:43] [ns_1@127.0.0.1:<0.1576.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:43] [ns_1@127.0.0.1:<0.1475.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:44] [ns_1@127.0.0.1:<0.1582.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:00:44] [ns_1@127.0.0.1:<0.1573.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:44] [ns_1@127.0.0.1:<0.1569.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:00:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1592.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1608.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:45] [ns_1@127.0.0.1:<0.1589.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:45] [ns_1@127.0.0.1:<0.1489.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:46] [ns_1@127.0.0.1:<0.1595.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:46] [ns_1@127.0.0.1:<0.1585.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:47] [ns_1@127.0.0.1:<0.1601.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:47] [ns_1@127.0.0.1:<0.1501.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:00:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1608.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1629.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:48] [ns_1@127.0.0.1:<0.1605.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:49] [ns_1@127.0.0.1:<0.1597.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:00:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.1573.2>} [stats:error] [2012-03-26 3:00:49] [ns_1@127.0.0.1:<0.1616.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:50] [ns_1@127.0.0.1:<0.1580.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:00:50] [ns_1@127.0.0.1:<0.1573.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:50] [ns_1@127.0.0.1:<0.1620.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:00:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1629.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1646.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:00:51] [ns_1@127.0.0.1:<0.1611.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:51] [ns_1@127.0.0.1:<0.1626.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:51] [ns_1@127.0.0.1:<0.1513.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:51] [ns_1@127.0.0.1:<0.1553.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:51] [ns_1@127.0.0.1:<0.1563.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:00:51] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason shutdown [ns_server:info] [2012-03-26 3:00:51] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 3:00:51] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 3:00:51] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 3:00:51] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 3:00:51] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [user:info] [2012-03-26 3:00:54] [ns_1@127.0.0.1:<0.1675.2>:menelaus_web_alerts_srv:global_alert:64] Approaching full disk warning. Usage of disk "/" on node "127.0.0.1" is around 100%. [ns_server:warn] [2012-03-26 3:00:56] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 3:00:56] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 3:00:56] [ns_1@127.0.0.1:<0.1641.2>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 3:00:56] [ns_1@127.0.0.1:<0.1636.2>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 3:00:56] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 3:00:56] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 3:00:56] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 3:00:56] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 3:00:56] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 3:00:56] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [stats:error] [2012-03-26 3:00:56] [ns_1@127.0.0.1:<0.1652.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:00:56] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 3:00:56] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 3:00:56] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [error_logger:error] [2012-03-26 3:00:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1646.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:00:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1700.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 3:00:57] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 03:00:58: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 3:00:57] [ns_1@127.0.0.1:<0.1654.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:58] [ns_1@127.0.0.1:<0.1593.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:58] [ns_1@127.0.0.1:<0.1630.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:59] [ns_1@127.0.0.1:<0.1701.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:00:59] [ns_1@127.0.0.1:<0.1656.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:00:59] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.1689.2>} [ns_doctor:info] [2012-03-26 3:00:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,756055,231355}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38344280}, {processes,10455448}, {processes_used,8848944}, {system,27888832}, {atom,1306681}, {atom_used,1284164}, {binary,607320}, {code,12859877}, {ets,2478728}]}, {system_stats, [{cpu_utilization_rate,25.376884422110553}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,7076}, {memory_data,{4040077312,4014018560,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 25200 kB\nBuffers: 65148 kB\nCached: 3525976 kB\nSwapCached: 0 kB\nActive: 313392 kB\nInactive: 3438480 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 25200 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 80 kB\nWriteback: 0 kB\nAnonPages: 160756 kB\nMapped: 24872 kB\nSlab: 134268 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582464 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3610599424}, {buffered_memory,66711552}, {free_memory,25804800}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{7071968,0}}, {context_switches,{1927615,0}}, {garbage_collection,{1024330,1466734368,0}}, {io,{{input,29414524},{output,75862126}}}, {reductions,{404160432,612448}}, {run_queue,0}, {runtime,{89550,150}}]}]}] [stats:error] [2012-03-26 3:01:00] [ns_1@127.0.0.1:<0.1603.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1700.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1719.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:00] [ns_1@127.0.0.1:<0.1643.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:01] [ns_1@127.0.0.1:<0.1711.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:01] [ns_1@127.0.0.1:<0.1659.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:01:01] [ns_1@127.0.0.1:<0.1689.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:02] [ns_1@127.0.0.1:<0.1618.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:02] [ns_1@127.0.0.1:<0.1682.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:03] [ns_1@127.0.0.1:<0.1726.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1719.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1738.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:03] [ns_1@127.0.0.1:<0.1662.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:04] [ns_1@127.0.0.1:<0.1707.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:04] [ns_1@127.0.0.1:<0.1683.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:05] [ns_1@127.0.0.1:<0.1739.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:05] [ns_1@127.0.0.1:<0.1664.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:06] [ns_1@127.0.0.1:<0.1722.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1738.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1759.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:06] [ns_1@127.0.0.1:<0.1684.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:07] [ns_1@127.0.0.1:<0.1752.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:07] [ns_1@127.0.0.1:<0.1666.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:01:07] [ns_1@127.0.0.1:<0.1689.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:08] [ns_1@127.0.0.1:<0.1732.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:08] [ns_1@127.0.0.1:<0.1685.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1759.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1774.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:09] [ns_1@127.0.0.1:<0.1764.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:09] [ns_1@127.0.0.1:<0.1705.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:01:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.1689.2>} [stats:error] [2012-03-26 3:01:10] [ns_1@127.0.0.1:<0.1745.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:10] [ns_1@127.0.0.1:<0.1693.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1550.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1789.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:11] [ns_1@127.0.0.1:<0.1777.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:11] [ns_1@127.0.0.1:<0.1716.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1774.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1797.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:12] [ns_1@127.0.0.1:<0.1783.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:12] [ns_1@127.0.0.1:<0.1709.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:13] [ns_1@127.0.0.1:<0.1792.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:13] [ns_1@127.0.0.1:<0.1730.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:01:13] [ns_1@127.0.0.1:<0.1689.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:14] [ns_1@127.0.0.1:<0.1760.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:14] [ns_1@127.0.0.1:<0.1724.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1797.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1814.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:15] [ns_1@127.0.0.1:<0.1804.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:15] [ns_1@127.0.0.1:<0.1743.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:16] [ns_1@127.0.0.1:<0.1770.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:16] [ns_1@127.0.0.1:<0.1735.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:17] [ns_1@127.0.0.1:<0.1821.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:17] [ns_1@127.0.0.1:<0.1756.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1814.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1835.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:18] [ns_1@127.0.0.1:<0.1787.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:18] [ns_1@127.0.0.1:<0.1747.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:19] [ns_1@127.0.0.1:<0.1832.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:01:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.1689.2>} [stats:error] [2012-03-26 3:01:19] [ns_1@127.0.0.1:<0.1768.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:01:19] [ns_1@127.0.0.1:<0.1689.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:20] [ns_1@127.0.0.1:<0.1800.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:20] [ns_1@127.0.0.1:<0.1762.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1835.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1855.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:21] [ns_1@127.0.0.1:<0.1844.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:21] [ns_1@127.0.0.1:<0.1798.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:22] [ns_1@127.0.0.1:<0.1810.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:22] [ns_1@127.0.0.1:<0.1775.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:23] [ns_1@127.0.0.1:<0.1860.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:23] [ns_1@127.0.0.1:<0.1808.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1855.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1873.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:24] [ns_1@127.0.0.1:<0.1826.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:24] [ns_1@127.0.0.1:<0.1790.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:25] [ns_1@127.0.0.1:<0.1870.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:25] [ns_1@127.0.0.1:<0.1824.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:26] [ns_1@127.0.0.1:<0.1838.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:26] [ns_1@127.0.0.1:<0.1802.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1873.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1891.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:27] [ns_1@127.0.0.1:<0.1884.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:27] [ns_1@127.0.0.1:<0.1836.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:28] [ns_1@127.0.0.1:<0.1851.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:28] [ns_1@127.0.0.1:<0.1817.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:29] [ns_1@127.0.0.1:<0.1896.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:01:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.1689.2>} [stats:error] [2012-03-26 3:01:30] [ns_1@127.0.0.1:<0.1849.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1891.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1909.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:30] [ns_1@127.0.0.1:<0.1864.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:31] [ns_1@127.0.0.1:<0.1828.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:31] [ns_1@127.0.0.1:<0.1906.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:32] [ns_1@127.0.0.1:<0.1862.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:32] [ns_1@127.0.0.1:<0.1876.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:33] [ns_1@127.0.0.1:<0.1840.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1909.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1927.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:33] [ns_1@127.0.0.1:<0.1920.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:34] [ns_1@127.0.0.1:<0.1874.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:34] [ns_1@127.0.0.1:<0.1888.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:35] [ns_1@127.0.0.1:<0.1856.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:35] [ns_1@127.0.0.1:<0.1932.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:36] [ns_1@127.0.0.1:<0.1886.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1927.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1948.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:36] [ns_1@127.0.0.1:<0.1900.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:37] [ns_1@127.0.0.1:<0.1866.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:37] [ns_1@127.0.0.1:<0.1945.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:38] [ns_1@127.0.0.1:<0.1898.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:38] [ns_1@127.0.0.1:<0.1914.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:39] [ns_1@127.0.0.1:<0.1880.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1948.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1964.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:39] [ns_1@127.0.0.1:<0.1957.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:01:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.1689.2>} [stats:error] [2012-03-26 3:01:40] [ns_1@127.0.0.1:<0.1912.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:40] [ns_1@127.0.0.1:<0.1924.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:41] [ns_1@127.0.0.1:<0.1892.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:41] [ns_1@127.0.0.1:<0.1969.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1964.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1982.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:42] [ns_1@127.0.0.1:<0.1922.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:42] [ns_1@127.0.0.1:<0.1936.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:43] [ns_1@127.0.0.1:<0.1902.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:43] [ns_1@127.0.0.1:<0.1983.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1982.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.1994.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:45] [ns_1@127.0.0.1:<0.1934.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:46] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.1789.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:46] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2001.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:46] [ns_1@127.0.0.1:<0.1949.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:46] [ns_1@127.0.0.1:<0.1951.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:47] [ns_1@127.0.0.1:<0.1916.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:47] [ns_1@127.0.0.1:<0.2002.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.1994.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2013.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:48] [ns_1@127.0.0.1:<0.1959.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:48] [ns_1@127.0.0.1:<0.1961.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:49] [ns_1@127.0.0.1:<0.1928.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:01:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.1689.2>} [stats:error] [2012-03-26 3:01:49] [ns_1@127.0.0.1:<0.2014.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:50] [ns_1@127.0.0.1:<0.1973.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:50] [ns_1@127.0.0.1:<0.1975.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2013.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2033.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:51] [ns_1@127.0.0.1:<0.1941.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:51] [ns_1@127.0.0.1:<0.2027.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:52] [ns_1@127.0.0.1:<0.1985.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:52] [ns_1@127.0.0.1:<0.1987.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:53] [ns_1@127.0.0.1:<0.1953.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:53] [ns_1@127.0.0.1:<0.2040.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2033.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2051.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:54] [ns_1@127.0.0.1:<0.2004.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:54] [ns_1@127.0.0.1:<0.2006.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:55] [ns_1@127.0.0.1:<0.1965.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:55] [ns_1@127.0.0.1:<0.2052.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:01:55] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 3:01:55] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.1689.2> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [stats:error] [2012-03-26 3:01:56] [ns_1@127.0.0.1:<0.2016.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:01:56] [ns_1@127.0.0.1:<0.2018.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:01:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2051.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:01:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2069.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:01:57] [ns_1@127.0.0.1:<0.1977.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 3:01:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,756115,272417}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38553080}, {processes,10658272}, {processes_used,9048928}, {system,27894808}, {atom,1306681}, {atom_used,1284164}, {binary,635080}, {code,12859877}, {ets,2449944}]}, {system_stats, [{cpu_utilization_rate,25.621890547263682}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,7137}, {memory_data,{4040077312,4014534656,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27148 kB\nBuffers: 65236 kB\nCached: 3523052 kB\nSwapCached: 0 kB\nActive: 313448 kB\nInactive: 3435584 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27148 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 28 kB\nWriteback: 0 kB\nAnonPages: 160772 kB\nMapped: 24872 kB\nSlab: 134312 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 581984 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3607605248}, {buffered_memory,66801664}, {free_memory,27799552}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{7132008,1}}, {context_switches,{1940013,0}}, {garbage_collection,{1030724,1477655141,0}}, {io,{{input,29431545},{output,76277382}}}, {reductions,{406569892,545208}}, {run_queue,0}, {runtime,{90300,170}}]}]}] [error_logger:error] [2012-03-26 3:02:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2069.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2081.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-03-26 3:02:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2081.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2087.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 3:02:04] [ns_1@127.0.0.1:<0.2077.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2087.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2097.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:07] [ns_1@127.0.0.1:<0.1989.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:07] [ns_1@127.0.0.1:<0.2064.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:08] [ns_1@127.0.0.1:<0.2029.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:08] [ns_1@127.0.0.1:<0.2034.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2097.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2109.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:09] [ns_1@127.0.0.1:<0.1997.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:02:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.2077.2>} [stats:error] [2012-03-26 3:02:09] [ns_1@127.0.0.1:<0.2104.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:10] [ns_1@127.0.0.1:<0.2042.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:02:10] [ns_1@127.0.0.1:<0.2077.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:10] [ns_1@127.0.0.1:<0.2044.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:11] [ns_1@127.0.0.1:<0.2008.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:12] [ns_1@127.0.0.1:<0.2120.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2109.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2132.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:12] [ns_1@127.0.0.1:<0.2054.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:13] [ns_1@127.0.0.1:<0.2058.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:13] [ns_1@127.0.0.1:<0.2022.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:14] [ns_1@127.0.0.1:<0.2133.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:14] [ns_1@127.0.0.1:<0.2066.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:15] [ns_1@127.0.0.1:<0.2070.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2132.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2148.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:15] [ns_1@127.0.0.1:<0.2038.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:16] [ns_1@127.0.0.1:<0.2143.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:16] [ns_1@127.0.0.1:<0.2106.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:02:16] [ns_1@127.0.0.1:<0.2077.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:17] [ns_1@127.0.0.1:<0.2098.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:17] [ns_1@127.0.0.1:<0.2048.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:18] [ns_1@127.0.0.1:<0.2157.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2148.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2169.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 3:02:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.2077.2>} [stats:error] [2012-03-26 3:02:18] [ns_1@127.0.0.1:<0.2122.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:20] [ns_1@127.0.0.1:<0.2170.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:20] [ns_1@127.0.0.1:<0.2135.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2001.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2181.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-03-26 3:02:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2169.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2183.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:21] [ns_1@127.0.0.1:<0.2124.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:22] [ns_1@127.0.0.1:<0.2129.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:22] [ns_1@127.0.0.1:<0.2179.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:02:22] [ns_1@127.0.0.1:<0.2077.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:23] [ns_1@127.0.0.1:<0.2145.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:23] [ns_1@127.0.0.1:<0.2137.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:24] [ns_1@127.0.0.1:<0.2141.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2183.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2202.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:24] [ns_1@127.0.0.1:<0.2192.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:25] [ns_1@127.0.0.1:<0.2159.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:25] [ns_1@127.0.0.1:<0.2161.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:26] [ns_1@127.0.0.1:<0.2166.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:26] [ns_1@127.0.0.1:<0.2205.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:27] [ns_1@127.0.0.1:<0.2110.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2202.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2221.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:27] [ns_1@127.0.0.1:<0.2062.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:28] [ns_1@127.0.0.1:<0.2177.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:28] [ns_1@127.0.0.1:<0.2218.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:29] [ns_1@127.0.0.1:<0.2184.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:29] [ns_1@127.0.0.1:<0.2102.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:02:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.2077.2>} [stats:error] [2012-03-26 3:02:30] [ns_1@127.0.0.1:<0.2190.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2221.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2239.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:30] [ns_1@127.0.0.1:<0.2230.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:31] [ns_1@127.0.0.1:<0.2194.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:31] [ns_1@127.0.0.1:<0.2116.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:32] [ns_1@127.0.0.1:<0.2203.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:32] [ns_1@127.0.0.1:<0.2244.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:33] [ns_1@127.0.0.1:<0.2209.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2239.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2257.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:33] [ns_1@127.0.0.1:<0.2188.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:34] [ns_1@127.0.0.1:<0.2216.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:34] [ns_1@127.0.0.1:<0.2254.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:35] [ns_1@127.0.0.1:<0.2151.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:35] [ns_1@127.0.0.1:<0.2199.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2257.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2276.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:36] [ns_1@127.0.0.1:<0.2228.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:36] [ns_1@127.0.0.1:<0.2266.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:37] [ns_1@127.0.0.1:<0.2222.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:37] [ns_1@127.0.0.1:<0.2155.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:38] [ns_1@127.0.0.1:<0.2242.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:38] [ns_1@127.0.0.1:<0.2281.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2276.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2292.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:39] [ns_1@127.0.0.1:<0.2232.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:02:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.2077.2>} [stats:error] [2012-03-26 3:02:39] [ns_1@127.0.0.1:<0.2213.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:40] [ns_1@127.0.0.1:<0.2252.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:40] [ns_1@127.0.0.1:<0.2293.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:41] [ns_1@127.0.0.1:<0.2246.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:41] [ns_1@127.0.0.1:<0.2226.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2292.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2312.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:42] [ns_1@127.0.0.1:<0.2264.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:42] [ns_1@127.0.0.1:<0.2305.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:43] [ns_1@127.0.0.1:<0.2258.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:43] [ns_1@127.0.0.1:<0.2236.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:44] [ns_1@127.0.0.1:<0.2279.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:44] [ns_1@127.0.0.1:<0.2317.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2312.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2328.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:45] [ns_1@127.0.0.1:<0.2271.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:45] [ns_1@127.0.0.1:<0.2250.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:46] [ns_1@127.0.0.1:<0.2289.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:46] [ns_1@127.0.0.1:<0.2331.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:47] [ns_1@127.0.0.1:<0.2283.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:47] [ns_1@127.0.0.1:<0.2262.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2328.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2348.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:48] [ns_1@127.0.0.1:<0.2303.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:48] [ns_1@127.0.0.1:<0.2341.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:49] [ns_1@127.0.0.1:<0.2297.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:02:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.2077.2>} [stats:error] [2012-03-26 3:02:49] [ns_1@127.0.0.1:<0.2277.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:50] [ns_1@127.0.0.1:<0.2315.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:50] [ns_1@127.0.0.1:<0.2353.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2348.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2367.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:51] [ns_1@127.0.0.1:<0.2309.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:51] [ns_1@127.0.0.1:<0.2287.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:52] [ns_1@127.0.0.1:<0.2325.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:52] [ns_1@127.0.0.1:<0.2368.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:53] [ns_1@127.0.0.1:<0.2321.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:54] [ns_1@127.0.0.1:<0.2299.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2367.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2385.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:54] [ns_1@127.0.0.1:<0.2339.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:55] [ns_1@127.0.0.1:<0.2378.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:55] [ns_1@127.0.0.1:<0.2335.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:56] [ns_1@127.0.0.1:<0.2313.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2181.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:56] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2398.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:56] [ns_1@127.0.0.1:<0.2351.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:57] [ns_1@127.0.0.1:<0.2392.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:02:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2385.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:02:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2404.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:02:57] [ns_1@127.0.0.1:<0.2345.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:58] [ns_1@127.0.0.1:<0.2323.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:58] [ns_1@127.0.0.1:<0.2364.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:02:58] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 3:02:58] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.2077.2> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [stats:error] [2012-03-26 3:02:59] [ns_1@127.0.0.1:<0.2405.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:02:59] [ns_1@127.0.0.1:<0.2357.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 3:02:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,756175,304527}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38517104}, {processes,10588360}, {processes_used,8979016}, {system,27928744}, {atom,1306681}, {atom_used,1284164}, {binary,633160}, {code,12859877}, {ets,2478792}]}, {system_stats, [{cpu_utilization_rate,25.376884422110553}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,7197}, {memory_data,{4040077312,4012404736,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27024 kB\nBuffers: 65296 kB\nCached: 3523224 kB\nSwapCached: 0 kB\nActive: 313616 kB\nInactive: 3435648 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27024 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 20 kB\nWriteback: 0 kB\nAnonPages: 160772 kB\nMapped: 24872 kB\nSlab: 134324 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 581984 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3607781376}, {buffered_memory,66863104}, {free_memory,27672576}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{7192040,1}}, {context_switches,{1951197,0}}, {garbage_collection,{1036768,1486679100,0}}, {io,{{input,29441488},{output,76603970}}}, {reductions,{408717474,585703}}, {run_queue,0}, {runtime,{91020,190}}]}]}] [stats:error] [2012-03-26 3:03:00] [ns_1@127.0.0.1:<0.2337.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2404.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2431.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:00] [ns_1@127.0.0.1:<0.2376.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:01] [ns_1@127.0.0.1:<0.2415.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:01] [ns_1@127.0.0.1:<0.2372.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:02] [ns_1@127.0.0.1:<0.2349.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:02] [ns_1@127.0.0.1:<0.2388.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:03] [ns_1@127.0.0.1:<0.2447.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2431.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2458.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:03] [ns_1@127.0.0.1:<0.2382.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:04] [ns_1@127.0.0.1:<0.2362.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:04] [ns_1@127.0.0.1:<0.2401.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:03:04] [ns_1@127.0.0.1:<0.2425.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:05] [ns_1@127.0.0.1:<0.2459.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:05] [ns_1@127.0.0.1:<0.2396.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2458.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2478.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:06] [ns_1@127.0.0.1:<0.2374.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:06] [ns_1@127.0.0.1:<0.2413.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:07] [ns_1@127.0.0.1:<0.2472.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:07] [ns_1@127.0.0.1:<0.2409.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:07] [ns_1@127.0.0.1:<0.2428.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:07] [ns_1@127.0.0.1:<0.2451.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:08] [ns_1@127.0.0.1:<0.2386.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:08] [ns_1@127.0.0.1:<0.2437.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2478.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2498.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:09] [ns_1@127.0.0.1:<0.2485.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:09] [ns_1@127.0.0.1:<0.2463.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:03:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.2425.2>} [stats:error] [2012-03-26 3:03:10] [ns_1@127.0.0.1:<0.2399.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:03:10] [ns_1@127.0.0.1:<0.2425.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:10] [ns_1@127.0.0.1:<0.2455.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:11] [ns_1@127.0.0.1:<0.2487.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:11] [ns_1@127.0.0.1:<0.2479.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2498.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2521.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:12] [ns_1@127.0.0.1:<0.2411.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:12] [ns_1@127.0.0.1:<0.2467.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:13] [ns_1@127.0.0.1:<0.2489.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:13] [ns_1@127.0.0.1:<0.2493.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:14] [ns_1@127.0.0.1:<0.2434.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:14] [ns_1@127.0.0.1:<0.2483.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2521.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2537.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:15] [ns_1@127.0.0.1:<0.2503.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:15] [ns_1@127.0.0.1:<0.2507.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:16] [ns_1@127.0.0.1:<0.2453.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:03:16] [ns_1@127.0.0.1:<0.2425.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:16] [ns_1@127.0.0.1:<0.2499.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:17] [ns_1@127.0.0.1:<0.2518.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:17] [ns_1@127.0.0.1:<0.2522.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2537.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2558.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:18] [ns_1@127.0.0.1:<0.2465.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:18] [ns_1@127.0.0.1:<0.2513.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:19] [ns_1@127.0.0.1:<0.2530.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:03:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.2425.2>} [stats:error] [2012-03-26 3:03:19] [ns_1@127.0.0.1:<0.2532.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:20] [ns_1@127.0.0.1:<0.2481.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:20] [ns_1@127.0.0.1:<0.2526.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2558.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2577.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:21] [ns_1@127.0.0.1:<0.2544.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:21] [ns_1@127.0.0.1:<0.2546.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:22] [ns_1@127.0.0.1:<0.2495.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:03:22] [ns_1@127.0.0.1:<0.2425.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:22] [ns_1@127.0.0.1:<0.2540.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:23] [ns_1@127.0.0.1:<0.2555.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:23] [ns_1@127.0.0.1:<0.2559.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2577.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2596.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:24] [ns_1@127.0.0.1:<0.2511.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:24] [ns_1@127.0.0.1:<0.2550.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:26] [ns_1@127.0.0.1:<0.2572.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:26] [ns_1@127.0.0.1:<0.2524.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:27] [ns_1@127.0.0.1:<0.2563.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2596.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2612.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:27] [ns_1@127.0.0.1:<0.2567.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:28] [ns_1@127.0.0.1:<0.2584.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:28] [ns_1@127.0.0.1:<0.2534.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:29] [ns_1@127.0.0.1:<0.2578.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:29] [ns_1@127.0.0.1:<0.2582.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:03:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.2425.2>} [stats:error] [2012-03-26 3:03:30] [ns_1@127.0.0.1:<0.2597.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2612.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2630.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:30] [ns_1@127.0.0.1:<0.2548.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:31] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2398.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:31] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2635.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:31] [ns_1@127.0.0.1:<0.2588.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:31] [ns_1@127.0.0.1:<0.2593.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:32] [ns_1@127.0.0.1:<0.2607.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:32] [ns_1@127.0.0.1:<0.2561.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:33] [ns_1@127.0.0.1:<0.2613.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2630.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2649.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:33] [ns_1@127.0.0.1:<0.2605.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:34] [ns_1@127.0.0.1:<0.2619.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:34] [ns_1@127.0.0.1:<0.2574.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:35] [ns_1@127.0.0.1:<0.2623.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:35] [ns_1@127.0.0.1:<0.2617.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:36] [ns_1@127.0.0.1:<0.2633.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2649.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2671.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:36] [ns_1@127.0.0.1:<0.2586.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:37] [ns_1@127.0.0.1:<0.2638.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:37] [ns_1@127.0.0.1:<0.2627.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:38] [ns_1@127.0.0.1:<0.2644.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:38] [ns_1@127.0.0.1:<0.2599.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2671.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2685.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:39] [ns_1@127.0.0.1:<0.2650.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:39] [ns_1@127.0.0.1:<0.2642.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:03:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.2425.2>} [stats:error] [2012-03-26 3:03:40] [ns_1@127.0.0.1:<0.2656.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:40] [ns_1@127.0.0.1:<0.2609.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:41] [ns_1@127.0.0.1:<0.2663.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:41] [ns_1@127.0.0.1:<0.2654.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2685.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2705.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:42] [ns_1@127.0.0.1:<0.2672.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:42] [ns_1@127.0.0.1:<0.2621.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:43] [ns_1@127.0.0.1:<0.2676.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:43] [ns_1@127.0.0.1:<0.2668.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:44] [ns_1@127.0.0.1:<0.2682.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:44] [ns_1@127.0.0.1:<0.2636.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2705.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2721.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:45] [ns_1@127.0.0.1:<0.2688.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:45] [ns_1@127.0.0.1:<0.2680.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:46] [ns_1@127.0.0.1:<0.2696.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:46] [ns_1@127.0.0.1:<0.2646.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:47] [ns_1@127.0.0.1:<0.2700.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:47] [ns_1@127.0.0.1:<0.2692.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2721.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-03-26 3:03:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.2425.2>} [error_logger:info] [2012-03-26 3:03:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2741.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-03-26 3:03:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2741.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2748.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:51] [ns_1@127.0.0.1:<0.2706.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:52] [ns_1@127.0.0.1:<0.2708.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:53] [ns_1@127.0.0.1:<0.2658.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:53] [ns_1@127.0.0.1:<0.2714.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:54] [ns_1@127.0.0.1:<0.2716.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2748.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2764.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:54] [ns_1@127.0.0.1:<0.2718.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:55] [ns_1@127.0.0.1:<0.2674.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:55] [ns_1@127.0.0.1:<0.2728.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:56] [ns_1@127.0.0.1:<0.2730.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:56] [ns_1@127.0.0.1:<0.2732.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:57] [ns_1@127.0.0.1:<0.2686.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:03:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2764.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:03:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2782.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:03:57] [ns_1@127.0.0.1:<0.2738.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:58] [ns_1@127.0.0.1:<0.2753.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:58] [ns_1@127.0.0.1:<0.2755.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:03:58] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 3:03:58] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.2425.2> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [stats:error] [2012-03-26 3:03:59] [ns_1@127.0.0.1:<0.2698.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:03:59] [ns_1@127.0.0.1:<0.2751.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 3:03:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,756235,334604}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38482736}, {processes,10571432}, {processes_used,8962088}, {system,27911304}, {atom,1306681}, {atom_used,1284164}, {binary,635048}, {code,12859877}, {ets,2452648}]}, {system_stats, [{cpu_utilization_rate,25.43640897755611}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,7257}, {memory_data,{4040077312,4012531712,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27396 kB\nBuffers: 65376 kB\nCached: 3522904 kB\nSwapCached: 0 kB\nActive: 313536 kB\nInactive: 3435512 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27396 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 20 kB\nWriteback: 8 kB\nAnonPages: 160776 kB\nMapped: 24872 kB\nSlab: 134308 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582296 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3607453696}, {buffered_memory,66945024}, {free_memory,28053504}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{7252071,1}}, {context_switches,{1963844,0}}, {garbage_collection,{1043687,1497294163,0}}, {io,{{input,29696746},{output,77481577}}}, {reductions,{411497412,540875}}, {run_queue,0}, {runtime,{91830,150}}]}]}] [stats:error] [2012-03-26 3:04:00] [ns_1@127.0.0.1:<0.2765.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2782.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2804.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:00] [ns_1@127.0.0.1:<0.2767.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:01] [ns_1@127.0.0.1:<0.2710.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:01] [ns_1@127.0.0.1:<0.2761.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:02] [ns_1@127.0.0.1:<0.2777.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:02] [ns_1@127.0.0.1:<0.2779.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:03] [ns_1@127.0.0.1:<0.2722.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2804.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2822.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:03] [ns_1@127.0.0.1:<0.2775.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:04] [ns_1@127.0.0.1:<0.2789.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:04] [ns_1@127.0.0.1:<0.2791.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:04:04] [ns_1@127.0.0.1:<0.2798.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:05] [ns_1@127.0.0.1:<0.2734.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:05] [ns_1@127.0.0.1:<0.2787.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2635.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2841.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:06] [ns_1@127.0.0.1:<0.2807.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2822.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2845.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:06] [ns_1@127.0.0.1:<0.2809.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:07] [ns_1@127.0.0.1:<0.2757.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:07] [ns_1@127.0.0.1:<0.2801.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:08] [ns_1@127.0.0.1:<0.2817.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:08] [ns_1@127.0.0.1:<0.2829.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:08] [ns_1@127.0.0.1:<0.2846.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:08] [ns_1@127.0.0.1:<0.2819.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2845.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2863.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:09] [ns_1@127.0.0.1:<0.2771.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:09] [ns_1@127.0.0.1:<0.2815.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:04:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.2798.2>} [stats:error] [2012-03-26 3:04:10] [ns_1@127.0.0.1:<0.2860.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:04:10] [ns_1@127.0.0.1:<0.2798.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:10] [ns_1@127.0.0.1:<0.2831.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:11] [ns_1@127.0.0.1:<0.2783.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:11] [ns_1@127.0.0.1:<0.2827.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2863.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2887.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:12] [ns_1@127.0.0.1:<0.2876.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:12] [ns_1@127.0.0.1:<0.2848.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:13] [ns_1@127.0.0.1:<0.2793.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:13] [ns_1@127.0.0.1:<0.2842.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2887.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2899.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:15] [ns_1@127.0.0.1:<0.2811.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:16] [ns_1@127.0.0.1:<0.2854.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:16] [ns_1@127.0.0.1:<0.2890.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:04:16] [ns_1@127.0.0.1:<0.2798.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:17] [ns_1@127.0.0.1:<0.2864.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:17] [ns_1@127.0.0.1:<0.2823.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:18] [ns_1@127.0.0.1:<0.2856.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2899.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2920.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:18] [ns_1@127.0.0.1:<0.2910.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:19] [ns_1@127.0.0.1:<0.2879.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:19] [ns_1@127.0.0.1:<0.2836.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:04:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.2798.2>} [stats:error] [2012-03-26 3:04:20] [ns_1@127.0.0.1:<0.2858.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:20] [ns_1@127.0.0.1:<0.2923.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:21] [ns_1@127.0.0.1:<0.2892.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2920.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2939.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:21] [ns_1@127.0.0.1:<0.2850.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:22] [ns_1@127.0.0.1:<0.2872.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:22] [ns_1@127.0.0.1:<0.2936.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:04:22] [ns_1@127.0.0.1:<0.2798.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:23] [ns_1@127.0.0.1:<0.2902.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:23] [ns_1@127.0.0.1:<0.2868.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:24] [ns_1@127.0.0.1:<0.2888.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2939.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2958.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:24] [ns_1@127.0.0.1:<0.2948.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:25] [ns_1@127.0.0.1:<0.2912.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:25] [ns_1@127.0.0.1:<0.2884.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:26] [ns_1@127.0.0.1:<0.2908.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:26] [ns_1@127.0.0.1:<0.2961.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:27] [ns_1@127.0.0.1:<0.2925.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2958.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2976.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:27] [ns_1@127.0.0.1:<0.2896.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:28] [ns_1@127.0.0.1:<0.2921.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:28] [ns_1@127.0.0.1:<0.2973.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:29] [ns_1@127.0.0.1:<0.2940.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:29] [ns_1@127.0.0.1:<0.2906.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:04:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.2798.2>} [error_logger:error] [2012-03-26 3:04:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2976.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.2992.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:30] [ns_1@127.0.0.1:<0.2934.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:30] [ns_1@127.0.0.1:<0.2985.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:31] [ns_1@127.0.0.1:<0.2950.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:31] [ns_1@127.0.0.1:<0.2917.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:32] [ns_1@127.0.0.1:<0.2946.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:32] [ns_1@127.0.0.1:<0.2999.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.2992.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3010.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:33] [ns_1@127.0.0.1:<0.2965.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:33] [ns_1@127.0.0.1:<0.2929.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:34] [ns_1@127.0.0.1:<0.2959.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:34] [ns_1@127.0.0.1:<0.3011.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:35] [ns_1@127.0.0.1:<0.2977.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:35] [ns_1@127.0.0.1:<0.2944.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3010.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3031.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:36] [ns_1@127.0.0.1:<0.2971.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:36] [ns_1@127.0.0.1:<0.3021.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:37] [ns_1@127.0.0.1:<0.2987.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:37] [ns_1@127.0.0.1:<0.2955.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:38] [ns_1@127.0.0.1:<0.2983.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:38] [ns_1@127.0.0.1:<0.3036.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3031.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3047.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:39] [ns_1@127.0.0.1:<0.3001.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:04:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.2798.2>} [stats:error] [2012-03-26 3:04:39] [ns_1@127.0.0.1:<0.2969.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:40] [ns_1@127.0.0.1:<0.2997.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:40] [ns_1@127.0.0.1:<0.3048.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.2841.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:41] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3060.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:41] [ns_1@127.0.0.1:<0.3015.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:41] [ns_1@127.0.0.1:<0.2981.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3047.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3068.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:42] [ns_1@127.0.0.1:<0.3007.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:42] [ns_1@127.0.0.1:<0.3061.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:43] [ns_1@127.0.0.1:<0.3028.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:43] [ns_1@127.0.0.1:<0.2993.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:44] [ns_1@127.0.0.1:<0.3019.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:44] [ns_1@127.0.0.1:<0.3073.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3068.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3084.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:45] [ns_1@127.0.0.1:<0.3040.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:45] [ns_1@127.0.0.1:<0.3005.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:46] [ns_1@127.0.0.1:<0.3034.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:47] [ns_1@127.0.0.1:<0.3087.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:47] [ns_1@127.0.0.1:<0.3052.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:48] [ns_1@127.0.0.1:<0.3017.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3084.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3105.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:48] [ns_1@127.0.0.1:<0.3044.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:49] [ns_1@127.0.0.1:<0.3098.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:49] [ns_1@127.0.0.1:<0.3065.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:04:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.2798.2>} [stats:error] [2012-03-26 3:04:50] [ns_1@127.0.0.1:<0.3032.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:50] [ns_1@127.0.0.1:<0.3058.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:51] [ns_1@127.0.0.1:<0.3110.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3105.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3124.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:51] [ns_1@127.0.0.1:<0.3077.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:52] [ns_1@127.0.0.1:<0.3042.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:52] [ns_1@127.0.0.1:<0.3071.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:53] [ns_1@127.0.0.1:<0.3125.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:53] [ns_1@127.0.0.1:<0.3091.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:54] [ns_1@127.0.0.1:<0.3056.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3124.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3142.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:54] [ns_1@127.0.0.1:<0.3081.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:55] [ns_1@127.0.0.1:<0.3135.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:55] [ns_1@127.0.0.1:<0.3102.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:56] [ns_1@127.0.0.1:<0.3069.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:56] [ns_1@127.0.0.1:<0.3096.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:57] [ns_1@127.0.0.1:<0.3149.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:04:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3142.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:04:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3160.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:04:57] [ns_1@127.0.0.1:<0.3114.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:58] [ns_1@127.0.0.1:<0.3079.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:58] [ns_1@127.0.0.1:<0.3108.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:04:58] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 3:04:58] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.2798.2> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [stats:error] [2012-03-26 3:04:59] [ns_1@127.0.0.1:<0.3161.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:04:59] [ns_1@127.0.0.1:<0.3129.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 3:04:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,756295,365390}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38439928}, {processes,10492432}, {processes_used,8883088}, {system,27947496}, {atom,1306681}, {atom_used,1284164}, {binary,635576}, {code,12859877}, {ets,2481256}]}, {system_stats, [{cpu_utilization_rate,25.31328320802005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,7317}, {memory_data,{4040077312,4012277760,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27148 kB\nBuffers: 65436 kB\nCached: 3523072 kB\nSwapCached: 0 kB\nActive: 313624 kB\nInactive: 3435644 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27148 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 24 kB\nWriteback: 0 kB\nAnonPages: 160776 kB\nMapped: 24872 kB\nSlab: 134316 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582296 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3607625728}, {buffered_memory,67006464}, {free_memory,27799552}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{7312101,0}}, {context_switches,{1975853,0}}, {garbage_collection,{1049910,1507572670,0}}, {io,{{input,29706734},{output,77848867}}}, {reductions,{413831698,593665}}, {run_queue,0}, {runtime,{92570,190}}]}]}] [stats:error] [2012-03-26 3:05:00] [ns_1@127.0.0.1:<0.3094.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3160.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3182.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:00] [ns_1@127.0.0.1:<0.3121.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:01] [ns_1@127.0.0.1:<0.3171.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:01] [ns_1@127.0.0.1:<0.3139.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:02] [ns_1@127.0.0.1:<0.3106.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:02] [ns_1@127.0.0.1:<0.3133.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3182.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3198.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:03] [ns_1@127.0.0.1:<0.3189.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:03] [ns_1@127.0.0.1:<0.3153.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:04] [ns_1@127.0.0.1:<0.3119.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:05:04] [ns_1@127.0.0.1:<0.3176.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:04] [ns_1@127.0.0.1:<0.3145.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:05] [ns_1@127.0.0.1:<0.3201.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:05] [ns_1@127.0.0.1:<0.3165.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3198.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3220.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:06] [ns_1@127.0.0.1:<0.3131.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:06] [ns_1@127.0.0.1:<0.3157.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:07] [ns_1@127.0.0.1:<0.3217.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:07] [ns_1@127.0.0.1:<0.3179.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:08] [ns_1@127.0.0.1:<0.3143.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:08] [ns_1@127.0.0.1:<0.3155.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:08] [ns_1@127.0.0.1:<0.3167.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:08] [ns_1@127.0.0.1:<0.3169.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3220.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3240.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:09] [ns_1@127.0.0.1:<0.3229.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:05:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.3176.2>} [stats:error] [2012-03-26 3:05:09] [ns_1@127.0.0.1:<0.3193.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:10] [ns_1@127.0.0.1:<0.3185.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:05:10] [ns_1@127.0.0.1:<0.3176.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:10] [ns_1@127.0.0.1:<0.3187.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:11] [ns_1@127.0.0.1:<0.3247.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:11] [ns_1@127.0.0.1:<0.3205.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3240.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3263.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:12] [ns_1@127.0.0.1:<0.3195.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:12] [ns_1@127.0.0.1:<0.3199.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:13] [ns_1@127.0.0.1:<0.3260.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:13] [ns_1@127.0.0.1:<0.3221.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:14] [ns_1@127.0.0.1:<0.3207.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:14] [ns_1@127.0.0.1:<0.3209.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3263.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3279.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:15] [ns_1@127.0.0.1:<0.3272.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:15] [ns_1@127.0.0.1:<0.3231.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3060.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3288.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:16] [ns_1@127.0.0.1:<0.3223.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:05:16] [ns_1@127.0.0.1:<0.3176.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:16] [ns_1@127.0.0.1:<0.3225.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:17] [ns_1@127.0.0.1:<0.3286.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:18] [ns_1@127.0.0.1:<0.3233.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3279.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3301.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:18] [ns_1@127.0.0.1:<0.3237.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:19] [ns_1@127.0.0.1:<0.3241.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:19] [ns_1@127.0.0.1:<0.3298.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:05:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.3176.2>} [stats:error] [2012-03-26 3:05:20] [ns_1@127.0.0.1:<0.3235.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:20] [ns_1@127.0.0.1:<0.3253.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:21] [ns_1@127.0.0.1:<0.3255.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3301.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3321.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:21] [ns_1@127.0.0.1:<0.3310.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:22] [ns_1@127.0.0.1:<0.3251.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:22] [ns_1@127.0.0.1:<0.3266.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:05:22] [ns_1@127.0.0.1:<0.3176.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:23] [ns_1@127.0.0.1:<0.3268.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:23] [ns_1@127.0.0.1:<0.3326.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:24] [ns_1@127.0.0.1:<0.3264.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3321.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3340.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:24] [ns_1@127.0.0.1:<0.3276.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:25] [ns_1@127.0.0.1:<0.3282.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:25] [ns_1@127.0.0.1:<0.3337.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:26] [ns_1@127.0.0.1:<0.3274.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:26] [ns_1@127.0.0.1:<0.3291.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:27] [ns_1@127.0.0.1:<0.3293.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3340.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3358.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:27] [ns_1@127.0.0.1:<0.3351.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:28] [ns_1@127.0.0.1:<0.3289.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:28] [ns_1@127.0.0.1:<0.3304.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:29] [ns_1@127.0.0.1:<0.3306.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:29] [ns_1@127.0.0.1:<0.3363.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:05:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.3176.2>} [stats:error] [2012-03-26 3:05:30] [ns_1@127.0.0.1:<0.3302.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3358.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3376.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:30] [ns_1@127.0.0.1:<0.3317.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:31] [ns_1@127.0.0.1:<0.3322.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:31] [ns_1@127.0.0.1:<0.3373.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:32] [ns_1@127.0.0.1:<0.3315.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:32] [ns_1@127.0.0.1:<0.3330.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:33] [ns_1@127.0.0.1:<0.3332.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3376.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3394.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:33] [ns_1@127.0.0.1:<0.3387.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:34] [ns_1@127.0.0.1:<0.3328.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:34] [ns_1@127.0.0.1:<0.3343.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:35] [ns_1@127.0.0.1:<0.3347.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:35] [ns_1@127.0.0.1:<0.3399.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3394.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3413.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:36] [ns_1@127.0.0.1:<0.3341.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:36] [ns_1@127.0.0.1:<0.3355.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:37] [ns_1@127.0.0.1:<0.3359.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:37] [ns_1@127.0.0.1:<0.3414.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:38] [ns_1@127.0.0.1:<0.3353.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:38] [ns_1@127.0.0.1:<0.3367.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3413.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3429.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:39] [ns_1@127.0.0.1:<0.3369.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:05:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.3176.2>} [stats:error] [2012-03-26 3:05:39] [ns_1@127.0.0.1:<0.3424.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:40] [ns_1@127.0.0.1:<0.3365.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:40] [ns_1@127.0.0.1:<0.3381.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:41] [ns_1@127.0.0.1:<0.3383.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:41] [ns_1@127.0.0.1:<0.3436.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3429.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3449.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:42] [ns_1@127.0.0.1:<0.3379.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:42] [ns_1@127.0.0.1:<0.3391.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:43] [ns_1@127.0.0.1:<0.3395.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:43] [ns_1@127.0.0.1:<0.3450.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:44] [ns_1@127.0.0.1:<0.3389.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3449.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:47] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3463.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-03-26 3:05:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3463.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3471.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:48] [ns_1@127.0.0.1:<0.3401.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:49] [ns_1@127.0.0.1:<0.3403.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:49] [ns_1@127.0.0.1:<0.3410.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:05:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.3176.2>} [stats:error] [2012-03-26 3:05:50] [ns_1@127.0.0.1:<0.3460.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:50] [ns_1@127.0.0.1:<0.3416.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:51] [ns_1@127.0.0.1:<0.3418.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3288.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3489.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-03-26 3:05:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3471.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3491.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:51] [ns_1@127.0.0.1:<0.3422.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:52] [ns_1@127.0.0.1:<0.3472.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:52] [ns_1@127.0.0.1:<0.3426.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:53] [ns_1@127.0.0.1:<0.3430.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:53] [ns_1@127.0.0.1:<0.3434.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:54] [ns_1@127.0.0.1:<0.3485.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3491.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3509.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:54] [ns_1@127.0.0.1:<0.3440.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:55] [ns_1@127.0.0.1:<0.3442.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:55] [ns_1@127.0.0.1:<0.3446.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:56] [ns_1@127.0.0.1:<0.3498.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:56] [ns_1@127.0.0.1:<0.3452.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:57] [ns_1@127.0.0.1:<0.3454.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:05:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3509.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:05:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3528.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:05:57] [ns_1@127.0.0.1:<0.3458.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:58] [ns_1@127.0.0.1:<0.3510.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:58] [ns_1@127.0.0.1:<0.3474.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:05:58] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 3:05:58] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.3176.2> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [stats:error] [2012-03-26 3:05:59] [ns_1@127.0.0.1:<0.3476.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:05:59] [ns_1@127.0.0.1:<0.3480.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 3:05:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,756355,394406}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38407520}, {processes,10484960}, {processes_used,8874600}, {system,27922560}, {atom,1306681}, {atom_used,1284164}, {binary,631752}, {code,12859877}, {ets,2452400}]}, {system_stats, [{cpu_utilization_rate,25.44080604534005}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,7377}, {memory_data,{4040077312,4012150784,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27520 kB\nBuffers: 65512 kB\nCached: 3523220 kB\nSwapCached: 0 kB\nActive: 313720 kB\nInactive: 3435780 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27520 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 52 kB\nWriteback: 0 kB\nAnonPages: 160780 kB\nMapped: 24872 kB\nSlab: 134300 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582004 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3607777280}, {buffered_memory,67084288}, {free_memory,28180480}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{7372130,0}}, {context_switches,{1987720,0}}, {garbage_collection,{1055936,1517667856,0}}, {io,{{input,29716704},{output,78211987}}}, {reductions,{416115511,493274}}, {run_queue,0}, {runtime,{93350,160}}]}]}] [stats:error] [2012-03-26 3:06:00] [ns_1@127.0.0.1:<0.3523.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3528.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3565.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:00] [ns_1@127.0.0.1:<0.3487.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:01] [ns_1@127.0.0.1:<0.3492.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:01] [ns_1@127.0.0.1:<0.3496.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:02] [ns_1@127.0.0.1:<0.3535.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:02] [ns_1@127.0.0.1:<0.3500.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:03] [ns_1@127.0.0.1:<0.3502.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3565.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3583.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:03] [ns_1@127.0.0.1:<0.3506.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:04] [ns_1@127.0.0.1:<0.3568.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:06:04] [ns_1@127.0.0.1:<0.3559.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:04] [ns_1@127.0.0.1:<0.3512.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:05] [ns_1@127.0.0.1:<0.3516.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:05] [ns_1@127.0.0.1:<0.3521.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3583.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3603.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:06] [ns_1@127.0.0.1:<0.3578.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:06] [ns_1@127.0.0.1:<0.3525.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:07] [ns_1@127.0.0.1:<0.3529.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:07] [ns_1@127.0.0.1:<0.3533.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:08] [ns_1@127.0.0.1:<0.3590.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:08] [ns_1@127.0.0.1:<0.3537.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:08] [ns_1@127.0.0.1:<0.3570.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:08] [ns_1@127.0.0.1:<0.3580.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:08] [ns_1@127.0.0.1:<0.3592.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:08] [ns_1@127.0.0.1:<0.3608.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3603.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3627.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:09] [ns_1@127.0.0.1:<0.3554.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:06:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.3559.2>} [stats:error] [2012-03-26 3:06:09] [ns_1@127.0.0.1:<0.3562.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:10] [ns_1@127.0.0.1:<0.3606.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:06:10] [ns_1@127.0.0.1:<0.3559.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:10] [ns_1@127.0.0.1:<0.3628.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:11] [ns_1@127.0.0.1:<0.3572.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:11] [ns_1@127.0.0.1:<0.3576.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3627.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:12] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3650.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:12] [ns_1@127.0.0.1:<0.3616.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:12] [ns_1@127.0.0.1:<0.3642.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:13] [ns_1@127.0.0.1:<0.3584.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:13] [ns_1@127.0.0.1:<0.3588.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:14] [ns_1@127.0.0.1:<0.3618.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:14] [ns_1@127.0.0.1:<0.3655.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3650.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3666.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:15] [ns_1@127.0.0.1:<0.3600.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:15] [ns_1@127.0.0.1:<0.3604.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:16] [ns_1@127.0.0.1:<0.3620.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:06:16] [ns_1@127.0.0.1:<0.3559.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:16] [ns_1@127.0.0.1:<0.3669.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:17] [ns_1@127.0.0.1:<0.3612.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:18] [ns_1@127.0.0.1:<0.3614.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3666.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3687.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:18] [ns_1@127.0.0.1:<0.3622.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:19] [ns_1@127.0.0.1:<0.3679.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:19] [ns_1@127.0.0.1:<0.3634.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:06:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.3559.2>} [stats:error] [2012-03-26 3:06:20] [ns_1@127.0.0.1:<0.3636.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:20] [ns_1@127.0.0.1:<0.3624.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:21] [ns_1@127.0.0.1:<0.3692.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3687.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3706.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:21] [ns_1@127.0.0.1:<0.3647.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:22] [ns_1@127.0.0.1:<0.3651.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:22] [ns_1@127.0.0.1:<0.3640.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:06:22] [ns_1@127.0.0.1:<0.3559.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:23] [ns_1@127.0.0.1:<0.3707.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:23] [ns_1@127.0.0.1:<0.3659.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:24] [ns_1@127.0.0.1:<0.3661.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3706.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3725.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:24] [ns_1@127.0.0.1:<0.3653.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:25] [ns_1@127.0.0.1:<0.3717.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:25] [ns_1@127.0.0.1:<0.3673.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3489.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:26] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3736.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:26] [ns_1@127.0.0.1:<0.3675.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:26] [ns_1@127.0.0.1:<0.3663.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:27] [ns_1@127.0.0.1:<0.3732.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3725.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3744.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:27] [ns_1@127.0.0.1:<0.3684.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:28] [ns_1@127.0.0.1:<0.3688.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:28] [ns_1@127.0.0.1:<0.3677.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:29] [ns_1@127.0.0.1:<0.3745.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:29] [ns_1@127.0.0.1:<0.3696.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:06:29] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.3559.2>} [stats:error] [2012-03-26 3:06:30] [ns_1@127.0.0.1:<0.3701.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3744.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:30] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3762.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:30] [ns_1@127.0.0.1:<0.3690.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:31] [ns_1@127.0.0.1:<0.3755.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:31] [ns_1@127.0.0.1:<0.3711.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:32] [ns_1@127.0.0.1:<0.3713.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:32] [ns_1@127.0.0.1:<0.3703.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:33] [ns_1@127.0.0.1:<0.3770.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3762.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:33] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3781.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:33] [ns_1@127.0.0.1:<0.3722.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:34] [ns_1@127.0.0.1:<0.3726.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:34] [ns_1@127.0.0.1:<0.3715.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:35] [ns_1@127.0.0.1:<0.3782.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:35] [ns_1@127.0.0.1:<0.3737.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3781.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:36] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3800.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:36] [ns_1@127.0.0.1:<0.3739.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:36] [ns_1@127.0.0.1:<0.3728.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:37] [ns_1@127.0.0.1:<0.3797.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:37] [ns_1@127.0.0.1:<0.3749.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:38] [ns_1@127.0.0.1:<0.3751.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:38] [ns_1@127.0.0.1:<0.3741.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3800.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:39] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3816.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:39] [ns_1@127.0.0.1:<0.3809.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:06:39] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.3559.2>} [stats:error] [2012-03-26 3:06:39] [ns_1@127.0.0.1:<0.3759.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:40] [ns_1@127.0.0.1:<0.3765.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:40] [ns_1@127.0.0.1:<0.3753.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:41] [ns_1@127.0.0.1:<0.3821.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:41] [ns_1@127.0.0.1:<0.3774.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3816.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:42] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3836.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:42] [ns_1@127.0.0.1:<0.3776.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:42] [ns_1@127.0.0.1:<0.3768.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:44] [ns_1@127.0.0.1:<0.3788.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:44] [ns_1@127.0.0.1:<0.3778.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3836.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:45] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3848.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:45] [ns_1@127.0.0.1:<0.3833.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:45] [ns_1@127.0.0.1:<0.3786.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:46] [ns_1@127.0.0.1:<0.3803.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:47] [ns_1@127.0.0.1:<0.3790.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:47] [ns_1@127.0.0.1:<0.3855.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:48] [ns_1@127.0.0.1:<0.3801.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3848.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:48] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3868.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:48] [ns_1@127.0.0.1:<0.3813.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:49] [ns_1@127.0.0.1:<0.3805.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:49] [ns_1@127.0.0.1:<0.3865.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:06:49] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.3559.2>} [stats:error] [2012-03-26 3:06:50] [ns_1@127.0.0.1:<0.3811.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:50] [ns_1@127.0.0.1:<0.3827.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:51] [ns_1@127.0.0.1:<0.3817.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3868.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:51] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3887.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:51] [ns_1@127.0.0.1:<0.3877.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:52] [ns_1@127.0.0.1:<0.3823.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:52] [ns_1@127.0.0.1:<0.3839.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:53] [ns_1@127.0.0.1:<0.3829.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:53] [ns_1@127.0.0.1:<0.3892.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:54] [ns_1@127.0.0.1:<0.3837.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3887.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:54] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3905.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:54] [ns_1@127.0.0.1:<0.3845.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:55] [ns_1@127.0.0.1:<0.3851.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:55] [ns_1@127.0.0.1:<0.3902.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:56] [ns_1@127.0.0.1:<0.3843.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:56] [ns_1@127.0.0.1:<0.3859.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:57] [ns_1@127.0.0.1:<0.3861.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:06:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3905.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:06:57] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3923.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:06:57] [ns_1@127.0.0.1:<0.3916.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:58] [ns_1@127.0.0.1:<0.3857.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:58] [ns_1@127.0.0.1:<0.3871.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:06:58] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason {timeout, {gen_server, call, [{'ns_memcached-default', 'ns_1@127.0.0.1'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-03-26 3:06:58] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.3559.2> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@127.0.0.1'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 in call from ns_janitor_map_recoverer:read_existing_map/4 in call from ns_janitor:do_cleanup/3 ancestors: [<0.322.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.62.0>] messages: [] links: [<0.322.0>] dictionary: [{random_seed,{8712,26823,9965}}] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 1916 neighbours: [stats:error] [2012-03-26 3:06:59] [ns_1@127.0.0.1:<0.3873.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:06:59] [ns_1@127.0.0.1:<0.3928.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_doctor:info] [2012-03-26 3:06:59] [ns_1@127.0.0.1:ns_doctor:ns_doctor:handle_info:133] Current node statuses: [{'ns_1@127.0.0.1', [{last_heard,{1332,756415,426347}}, {outgoing_replications_safeness_level,[{"default",unknown}]}, {incoming_replications_conf_hashes,[{"default",[]}]}, {replication,[{"default",1.0}]}, {active_buckets,["default"]}, {ready_buckets,[]}, {local_tasks,[]}, {memory, [{total,38530768}, {processes,10569528}, {processes_used,8960184}, {system,27961240}, {atom,1306681}, {atom_used,1284164}, {binary,633712}, {code,12859877}, {ets,2483320}]}, {system_stats, [{cpu_utilization_rate,25.25}, {swap_total,6140452864}, {swap_used,102400}]}, {interesting_stats,[]}, {cluster_compatibility_version,1}, {version, [{public_key,"0.13"}, {ale,"8cffe61"}, {os_mon,"2.2.7"}, {couch_set_view,"1.2.0a-8bfbe08-git"}, {mnesia,"4.5"}, {inets,"5.7.1"}, {couch,"1.2.0a-8bfbe08-git"}, {kernel,"2.14.5"}, {crypto,"2.0.4"}, {ssl,"4.1.6"}, {sasl,"2.1.10"}, {ns_server,"2.0.0r-944-rel-enterprise"}, {mochiweb,"1.4.1"}, {ibrowse,"2.2.0"}, {oauth,"7d85d3ef"}, {stdlib,"1.17.5"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,7437}, {memory_data,{4040077312,4012150784,{<0.300.0>,601176}}}, {disk_data, [{"/",55007284,100},{"/boot",101086,21},{"/dev/shm",1972692,0}]}, {meminfo, <<"MemTotal: 3945388 kB\nMemFree: 27024 kB\nBuffers: 65624 kB\nCached: 3523360 kB\nSwapCached: 0 kB\nActive: 313764 kB\nInactive: 3436012 kB\nHighTotal: 0 kB\nHighFree: 0 kB\nLowTotal: 3945388 kB\nLowFree: 27024 kB\nSwapTotal: 5996536 kB\nSwapFree: 5996436 kB\nDirty: 56 kB\nWriteback: 0 kB\nAnonPages: 160788 kB\nMapped: 24872 kB\nSlab: 134332 kB\nPageTables: 6468 kB\nNFS_Unstable: 0 kB\nBounce: 0 kB\nCommitLimit: 7969228 kB\nCommitted_AS: 582004 kB\nVmallocTotal: 34359738367 kB\nVmallocUsed: 266140 kB\nVmallocChunk: 34359472007 kB\nHugePages_Total: 0\nHugePages_Free: 0\nHugePages_Rsvd: 0\nHugepagesize: 2048 kB\n">>}, {system_memory_data, [{system_total_memory,4040077312}, {free_swap,6140350464}, {total_swap,6140452864}, {cached_memory,3607920640}, {buffered_memory,67198976}, {free_memory,27672576}, {total_memory,4040077312}]}, {node_storage_conf, [{db_path,"/opt/couchbase/var/lib/couchdb"}, {index_path,"/opt/couchbase/var/lib/couchdb"}]}, {statistics, [{wall_clock,{7432163,1}}, {context_switches,{2000455,0}}, {garbage_collection,{1062427,1528479572,0}}, {io,{{input,29981938},{output,78822186}}}, {reductions,{418576099,561261}}, {run_queue,0}, {runtime,{94110,170}}]}]}] [stats:error] [2012-03-26 3:07:00] [ns_1@127.0.0.1:<0.3869.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:07:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3923.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:07:00] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3945.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:07:00] [ns_1@127.0.0.1:<0.3884.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:07:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3736.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:07:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3950.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:07:01] [ns_1@127.0.0.1:<0.3888.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:01] [ns_1@127.0.0.1:<0.3942.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:02] [ns_1@127.0.0.1:<0.3882.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:02] [ns_1@127.0.0.1:<0.3896.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:03] [ns_1@127.0.0.1:<0.3898.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:07:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3945.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:07:03] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3964.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:07:03] [ns_1@127.0.0.1:<0.3957.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:04] [ns_1@127.0.0.1:<0.3894.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:07:04] [ns_1@127.0.0.1:<0.3939.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:04] [ns_1@127.0.0.1:<0.3908.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:05] [ns_1@127.0.0.1:<0.3912.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:05] [ns_1@127.0.0.1:<0.3969.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:07:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3964.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:07:06] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.3985.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:07:06] [ns_1@127.0.0.1:<0.3906.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:06] [ns_1@127.0.0.1:<0.3920.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:07] [ns_1@127.0.0.1:<0.3924.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:07] [ns_1@127.0.0.1:<0.3986.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:08] [ns_1@127.0.0.1:<0.3918.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:08] [ns_1@127.0.0.1:<0.3932.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:07:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.3985.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:07:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4001.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:07:09] [ns_1@127.0.0.1:<0.3934.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:09] [ns_1@127.0.0.1:<0.3953.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:09] [ns_1@127.0.0.1:<0.3994.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:09] [ns_1@127.0.0.1:<0.3996.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:07:09] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:217] Janitor run exited for bucket "default" with reason shutdown [ns_server:info] [2012-03-26 3:07:09] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[]}] [ns_server:info] [2012-03-26 3:07:09] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 3:07:09] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 3:07:09] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 3:07:09] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [user:info] [2012-03-26 3:07:12] [ns_1@127.0.0.1:<0.4031.2>:menelaus_web_alerts_srv:global_alert:64] Approaching full disk warning. Usage of disk "/" on node "127.0.0.1" is around 100%. [ns_server:warn] [2012-03-26 3:07:14] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:325] Nodes ['ns_1@127.0.0.1'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-03-26 3:07:14] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:idle:331] Restarting moxi on nodes [] [menelaus:info] [2012-03-26 3:07:14] [ns_1@127.0.0.1:<0.3930.2>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-03-26 3:07:14] [ns_1@127.0.0.1:<0.3988.2>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-03-26 3:07:14] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,[]}]}]}] [ns_server:info] [2012-03-26 3:07:14] [ns_1@127.0.0.1:ns_config_isasl_sync:ns_config_isasl_sync:writeSASLConf:129] Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/data/isasl.pw" [ns_server:info] [2012-03-26 3:07:14] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 3:07:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 3:07:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 3:07:14] [ns_1@127.0.0.1:ns_config_events:ns_config_log:handle_event:60] config change: buckets -> [{configs,[{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,2153775104}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@127.0.0.1']}]}]}] [stats:error] [2012-03-26 3:07:14] [ns_1@127.0.0.1:<0.3998.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:07:14] [ns_1@127.0.0.1:ns_config_events:ns_node_disco_conf_events:handle_event:56] ns_node_disco_conf_events config all [ns_server:info] [2012-03-26 3:07:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:181] Pushing config [ns_server:info] [2012-03-26 3:07:14] [ns_1@127.0.0.1:ns_config_rep:ns_config_rep:handle_info:183] Pushing config done [ns_server:info] [2012-03-26 3:07:14] [ns_1@127.0.0.1:<0.18858.0>:ns_port_server:log:166] moxi<0.18858.0>: 2012-03-26 03:07:16: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.18858.0>: "name": "default", moxi<0.18858.0>: "nodeLocator": "vbucket", moxi<0.18858.0>: "saslPassword": "", moxi<0.18858.0>: "nodes": [{ moxi<0.18858.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.18858.0>: "replication": 0, moxi<0.18858.0>: "clusterMembership": "active", moxi<0.18858.0>: "status": "warmup", moxi<0.18858.0>: "thisNode": true, moxi<0.18858.0>: "hostname": "127.0.0.1:8091", moxi<0.18858.0>: "clusterCompatibility": 1, moxi<0.18858.0>: "version": "2.0.0r-944-rel-enterprise", moxi<0.18858.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.18858.0>: "ports": { moxi<0.18858.0>: "proxy": 11211, moxi<0.18858.0>: "direct": 11210 moxi<0.18858.0>: } moxi<0.18858.0>: }], moxi<0.18858.0>: "vBucketServerMap": { moxi<0.18858.0>: "hashAlgorithm": "CRC", moxi<0.18858.0>: "numReplicas": 1, moxi<0.18858.0>: "serverList": ["127.0.0.1:11210"], moxi<0.18858.0>: "vBucketMap": [] moxi<0.18858.0>: } moxi<0.18858.0>: }) [stats:error] [2012-03-26 3:07:14] [ns_1@127.0.0.1:<0.3951.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:07:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.4001.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:07:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4054.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:07:15] [ns_1@127.0.0.1:<0.4036.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:15] [ns_1@127.0.0.1:<0.4037.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:16] [ns_1@127.0.0.1:<0.4051.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:17] [ns_1@127.0.0.1:<0.3961.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:17] [ns_1@127.0.0.1:<0.4061.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:18] [ns_1@127.0.0.1:<0.4038.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:07:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.4054.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:07:18] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4074.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:07:18] [ns_1@127.0.0.1:<0.4065.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:19] [ns_1@127.0.0.1:<0.3973.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:07:19] [ns_1@127.0.0.1:<0.4043.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:19] [ns_1@127.0.0.1:<0.4071.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:07:19] [ns_1@127.0.0.1:<0.322.0>:ns_orchestrator:handle_info:208] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.4043.2>} [stats:error] [2012-03-26 3:07:20] [ns_1@127.0.0.1:<0.4039.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:20] [ns_1@127.0.0.1:<0.4079.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:21] [ns_1@127.0.0.1:<0.3990.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:07:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.4074.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:07:21] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4096.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:07:21] [ns_1@127.0.0.1:<0.4085.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:22] [ns_1@127.0.0.1:<0.4046.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:22] [ns_1@127.0.0.1:<0.4093.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:23] [ns_1@127.0.0.1:<0.4002.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:23] [ns_1@127.0.0.1:<0.4101.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:24] [ns_1@127.0.0.1:<0.4063.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:07:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.4096.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:07:24] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4114.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:07:24] [ns_1@127.0.0.1:<0.4105.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:25] [ns_1@127.0.0.1:<0.4004.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:07:25] [ns_1@127.0.0.1:<0.4043.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:25] [ns_1@127.0.0.1:<0.4111.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:26] [ns_1@127.0.0.1:<0.4077.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:26] [ns_1@127.0.0.1:<0.4117.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:27] [ns_1@127.0.0.1:<0.4006.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:07:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@127.0.0.1'}, {latest,minute,1}]}} Offender: [{pid,<0.4114.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:07:27] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4133.2>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-03-26 3:07:27] [ns_1@127.0.0.1:<0.4125.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [stats:error] [2012-03-26 3:07:28] [ns_1@127.0.0.1:<0.4091.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:07:28] [ns_1@127.0.0.1:<0.28975.1>:single_bucket_sup:top_loop:24] per-bucket supervisor for "default" died with reason shutdown [ns_server:info] [2012-03-26 3:07:28] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "test_non_default"}, {single_bucket_sup, start_link, ["test_non_default"]}, permanent, infinity, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 3:07:28] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "test_non_default"}, <0.4142.2>, supervisor, [single_bucket_sup]} [error_logger:info] [2012-03-26 3:07:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.4142.2>}, {name,{per_bucket_sup,"test_non_default"}}, {mfargs, {single_bucket_sup,start_link, ["test_non_default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-03-26 3:07:28] [ns_1@127.0.0.1:<0.4043.2>:ns_janitor:wait_for_memcached:278] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:error] [2012-03-26 3:07:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {shutdown, {gen_server,call,['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.3950.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-03-26 3:07:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.4146.2>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:error] [2012-03-26 3:07:28] [ns_1@127.0.0.1:'ns_memcached-test_non_default':ns_memcached:ensure_bucket:717] Unable to get config for bucket "test_non_default": {error, function_clause, [{proplists, get_value, [ram_quota, undefined, undefined]}, {ns_bucket, config_string, 1}, {ns_memcached, ensure_bucket, 2}, {ns_memcached, init, 1}, {gen_server, init_it, 6}, {proc_lib, init_p_do_apply, 3}]} [error_logger:info] [2012-03-26 3:07:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-test_non_default'} started: [{pid,<0.4145.2>}, {name,{ns_memcached,stats,"test_non_default"}}, {mfargs, {ns_memcached,start_link, [{"test_non_default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [user:info] [2012-03-26 3:07:28] [ns_1@127.0.0.1:'ns_memcached-test_non_default':ns_memcached:terminate:348] Shutting down bucket "test_non_default" on 'ns_1@127.0.0.1' for deletion [error_logger:error] [2012-03-26 3:07:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_memcached:init/1 pid: <0.4147.2> registered_name: [] exception exit: {{badmatch, {memcached_error,key_enoent,<<"Engine not found">>}}, [{ns_memcached,init,1}, {gen_server,init_it,6}, {proc_lib,init_p_do_apply,3}]} in function gen_server:init_it/6 ancestors: ['ns_memcached_sup-test_non_default', 'single_bucket_sup-test_non_default',<0.4142.2>] messages: [] links: [<0.4144.2>,#Port<0.32638>] dictionary: [] trap_exit: false status: running heap_size: 10946 stack_size: 24 reductions: 1220 neighbours: [ns_server:error] [2012-03-26 3:07:28] [ns_1@127.0.0.1:'ns_memcached-test_non_default':ns_memcached:terminate:357] Failed to delete bucket "test_non_default": {error, {badmatch, {memcached_error, key_enoent, <<"Not found.">>}}} [error_logger:error] [2012-03-26 3:07:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,'ns_memcached_sup-test_non_default'} Context: start_error Reason: {{badmatch, {memcached_error,key_enoent,<<"Engine not found">>}}, [{ns_memcached,init,1}, {gen_server,init_it,6}, {proc_lib,init_p_do_apply,3}]} Offender: [{pid,undefined}, {name,{ns_memcached,data,"test_non_default"}}, {mfargs, {ns_memcached,start_link,[{"test_non_default",data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [ns_server:info] [2012-03-26 3:07:28] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "default"}, {single_bucket_sup, start_link, ["default"]}, permanent, infinity, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 3:07:28] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "default"}, <0.4148.2>, supervisor, [single_bucket_sup]} [error_logger:error] [2012-03-26 3:07:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,'single_bucket_sup-test_non_default'} Context: start_error Reason: shutdown Offender: [{pid,undefined}, {name,{ns_memcached_sup,"test_non_default"}}, {mfargs,{ns_memcached_sup,start_link,["test_non_default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:error] [2012-03-26 3:07:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,ns_bucket_sup} Context: shutdown_error Reason: {{badmatch,{error,shutdown}}, [{single_bucket_sup,'-start_link/1-fun-0-',2}]} Offender: [{pid,<0.4142.2>}, {name,{per_bucket_sup,"test_non_default"}}, {mfargs,{single_bucket_sup,start_link,["test_non_default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-03-26 3:07:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.4148.2>}, {name,{per_bucket_sup,"default"}}, {mfargs,{single_bucket_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:error] [2012-03-26 3:07:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.4142.2> on node 'ns_1@127.0.0.1' with exit value: {{badmatch,{error,shutdown}},[{single_bucket_sup,'-start_link/1-fun-0-',2}]} [ns_server:info] [2012-03-26 3:07:28] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.366.0>: Had to wait 0h:20m:17s for shutdown memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.366.0>: Connected to mccouch: "localhost:11213" memcached<0.366.0>: open_db() failed to open database file, /opt/couchbase/var/lib/couchdb/default/0.couch.1 memcached<0.366.0>: Failed to open database, name=/opt/couchbase/var/lib/couchdb/default/0.couch.1 error=no such file [stats:error] [2012-03-26 3:07:28] [ns_1@127.0.0.1:<0.4130.2>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-03-26 3:07:28] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:86] Starting new child: {{per_bucket_sup, "uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d"}, {single_bucket_sup, start_link, ["uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d"]}, permanent, infinity, supervisor, [single_bucket_sup]} [ns_server:info] [2012-03-26 3:07:28] [ns_1@127.0.0.1:ns_bucket_worker:ns_bucket_sup:update_childs:93] Stopping child for dead bucket: {{per_bucket_sup, "uppercase_a4de4b5c-dc82-4a12-b006-95641acd410d"}, <0.4160.2>, supervisor, [single_bucket_sup]} [error_logger:error] [2012-03-26 3:07:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,'ns_memcached_sup-default'} Context: start_error Reason: {{badmatch,{error,closed}}, [{mc_client_binary,cmd_binary_vocal_recv,5}, {mc_client_binary,create_bucket,4}, {ns_memcached,ensure_bucket,2}, [error_logger:info] [2012-03-26 3:41:01] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================INFO REPORT========================= alarm_handler: {clear,{disk_almost_full,"/"}}